diff options
Diffstat (limited to 'drivers')
58 files changed, 562 insertions, 205 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c29c2c3ec0ad..b06f5f55ada9 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; + if (pr->apic_id == -1) + return -ENODEV; + status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) return -ENODEV; @@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device) } apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); - if (apic_id < 0) { + if (apic_id < 0) acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); - return -ENODEV; - } pr->apic_id = apic_id; cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d7d32c28829b..ad11ba4a412d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -206,13 +206,13 @@ unlock: spin_unlock_irqrestore(&ec->lock, flags); } -static int acpi_ec_sync_query(struct acpi_ec *ec); +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - return acpi_ec_sync_query(ec); + return acpi_ec_sync_query(ec, NULL); } return 0; } @@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); - /* - * Clears stale _Q events that might have accumulated in the EC. + * Process _Q events that might have accumulated in the EC. * Run with locked ec mutex. */ static void acpi_ec_clear(struct acpi_ec *ec) @@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec) u8 value = 0; for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { - status = acpi_ec_query_unlocked(ec, &value); + status = acpi_ec_sync_query(ec, &value); if (status || !value) break; } @@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt) kfree(handler); } -static int acpi_ec_sync_query(struct acpi_ec *ec) +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) { u8 value = 0; int status; struct acpi_ec_query_handler *handler, *copy; - if ((status = acpi_ec_query_unlocked(ec, &value))) + + status = acpi_ec_query_unlocked(ec, &value); + if (data) + *data = value; + if (status) return status; + list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { /* have custom handler for this bit */ @@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) if (!ec) return; mutex_lock(&ec->mutex); - acpi_ec_sync_query(ec); + acpi_ec_sync_query(ec, NULL); mutex_unlock(&ec->mutex); } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8986b9f22781..62ec61e8f84a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /** * deferred_probe_work_func() - Retry probing devices in the active list. @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false; * This functions moves all devices from the pending list to the active * list and schedules the deferred probe workqueue to process them. It * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. */ static void driver_deferred_probe_trigger(void) { @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void) * into the active list so they can be retried by the workqueue */ mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); list_splice_tail_init(&deferred_probe_pending_list, &deferred_probe_active_list); mutex_unlock(&deferred_probe_mutex); @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { int ret = 0; + int local_trigger_count = atomic_read(&deferred_trigger_count); atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -310,6 +324,9 @@ probe_failed: /* Driver requested deferred probing */ dev_info(dev, "Driver %s requests probe deferral\n", drv->name); driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); } else if (ret != -ENODEV && ret != -ENXIO) { /* driver matched but the probe failed */ printk(KERN_WARNING diff --git a/drivers/base/platform.c b/drivers/base/platform.c index e714709704e4..5b47210889e0 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -13,6 +13,7 @@ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> @@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return -ENXIO; return dev->archdata.irqs[num]; #else - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); + struct resource *r; + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) + return of_irq_get(dev->dev.of_node, num); + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; #endif diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57e823c44d2a..5163ec13429d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; static struct clock_event_device __percpu *arch_timer_evt; static bool arch_timer_use_virtual = true; +static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; /* @@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type, clk->features = CLOCK_EVT_FEAT_ONESHOT; if (type == ARCH_CP15_TIMER) { - clk->features |= CLOCK_EVT_FEAT_C3STOP; + if (arch_timer_c3stop) + clk->features |= CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 450; clk->cpumask = cpumask_of(smp_processor_id()); @@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np) } } + arch_timer_c3stop = !of_property_read_bool(np, "always-on"); + arch_timer_register(); arch_timer_common_init(); } diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index ca81809d159d..7ce442148c3f 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c @@ -212,4 +212,9 @@ error_free: return ret; } -CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); +static void __init zevio_timer_init(struct device_node *node) +{ + BUG_ON(zevio_timer_add(node)); +} + +CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index d00e5d1abd25..5c4369b5d834 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, * Sets a new clock ratio. */ -static void longhaul_setstate(struct cpufreq_policy *policy, +static int longhaul_setstate(struct cpufreq_policy *policy, unsigned int table_index) { unsigned int mults_index; @@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy, /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) - return; + return -EINVAL; + speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) - return; + return -EINVAL; + /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; @@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy, freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - cpufreq_freq_transition_begin(policy, &freqs); - pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: @@ -385,12 +385,14 @@ retry_loop: goto retry_loop; } } - /* Report true CPU frequency */ - cpufreq_freq_transition_end(policy, &freqs, 0); - if (!bm_timeout) + if (!bm_timeout) { printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); + return -EBUSY; + } + + return 0; } /* @@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy, unsigned int i; unsigned int dir = 0; u8 vid, current_vid; + int retval = 0; if (!can_scale_voltage) - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both @@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy, while (i != table_index) { vid = (longhaul_table[i].driver_data >> 8) & 0x1f; if (vid != current_vid) { - longhaul_setstate(policy, i); + retval = longhaul_setstate(policy, i); current_vid = vid; msleep(200); } @@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy, else i--; } - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); } + longhaul_index = table_index; - return 0; + return retval; } @@ -968,7 +972,15 @@ static void __exit longhaul_exit(void) for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = longhaul_table[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); longhaul_setstate(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); break; } } diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 49f120e1bc7b..78904e6ca4a0 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i) static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int best_i) { - struct cpufreq_freqs freqs; if (clock_ratio[best_i].driver_data > max_multiplier) { printk(KERN_ERR PFX "invalid target frequency\n"); return -EINVAL; } - freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); - freqs.new = busfreq * clock_ratio[best_i].driver_data; - - cpufreq_freq_transition_begin(policy, &freqs); - powernow_k6_set_cpu_multiplier(best_i); - cpufreq_freq_transition_end(policy, &freqs, 0); - return 0; } @@ -227,9 +219,20 @@ have_busfreq: static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; - for (i = 0; i < 8; i++) { - if (i == max_multiplier) + + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == max_multiplier) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = clock_ratio[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_target(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); + break; + } } return 0; } diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index f911645c3f6d..e61e224475ad 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) freqs.new = powernow_table[index].frequency; - cpufreq_freq_transition_begin(policy, &freqs); - /* Now do the magic poking into the MSRs. */ if (have_a0 == 1) /* A0 errata 5 */ @@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_freq_transition_end(policy, &freqs, 0); - return 0; } diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index a1ca3dd04a8e..0af618abebaf 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *table; struct cpu_data *data; unsigned int cpu = policy->cpu; + u64 transition_latency_hz; np = of_get_cpu_node(cpu, NULL); if (!np) @@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) for_each_cpu(i, per_cpu(cpu_mask, cpu)) per_cpu(cpu_data, i) = data; + transition_latency_hz = 12ULL * NSEC_PER_SEC; policy->cpuinfo.transition_latency = - (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); + do_div(transition_latency_hz, fsl_get_sys_freq()); + of_node_put(np); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e930d4fe29c7..1ef5ab9c9d51 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, plane->crtc = crtc; plane->fb = crtc->primary->fb; + drm_framebuffer_reference(plane->fb); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index c786cd4f457b..2a3ad24276f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; - DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, + DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index eb73e3bf2a0c..4ac438187568 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsi->reg_base) { + if (IS_ERR(dsi->reg_base)) { dev_err(&pdev->dev, "failed to remap io region\n"); - return -EADDRNOTAVAIL; + return PTR_ERR(dsi->reg_base); } dsi->phy = devm_phy_get(&pdev->dev, "dsim"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 7afead9c3f30..852f2dadaebd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) win_data->enabled = true; - DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); + DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); if (ctx->vblank_on) schedule_work(&ctx->work); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ab5e93c30aa2..62a5c3627b90 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full) /* Full ppgtt disabled by default for now due to issues. */ if (full) - return false; /* HAS_PPGTT(dev) */ + return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2); else return HAS_ALIASING_PPGTT(dev); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7753249b3a95..f98ba4e6e70b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { - WARN_ONCE(hpd[i] & hotplug_trigger && - dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, - "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", - hotplug_trigger, i, hpd[i]); + if (hpd[i] & hotplug_trigger && + dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { + /* + * On GMCH platforms the interrupt mask bits only + * prevent irq generation, not the setting of the + * hotplug bits itself. So only WARN about unexpected + * interrupts on saner platforms. + */ + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), + "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", + hotplug_trigger, i, hpd[i]); + + continue; + } if (!(hpd[i] & hotplug_trigger) || dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9f5b18d9d885..c77af69c2d8f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -827,6 +827,7 @@ enum punit_power_well { # define MI_FLUSH_ENABLE (1 << 12) # define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define MODE_IDLE (1 << 9) +# define STOP_RING (1 << 8) #define GEN6_GT_MODE 0x20d0 #define GEN7_GT_MODE 0x7008 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dae976f51d83..69bcc42a0e44 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_h); - PIPE_CONF_CHECK_I(gmch_pfit.control); - /* pfit ratios are autocomputed by the hw on gen4+ */ - if (INTEL_INFO(dev)->gen < 4) - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); - PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + /* + * FIXME: BIOS likes to set up a cloned config with lvds+external + * screen. Since we don't yet re-compute the pipe config when moving + * just the lvds port away to another pipe the sw tracking won't match. + * + * Proper atomic modesets with recomputed global state will fix this. + * Until then just don't check gmch state for inherited modes. + */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { + PIPE_CONF_CHECK_I(gmch_pfit.control); + /* pfit ratios are autocomputed by the hw on gen4+ */ + if (INTEL_INFO(dev)->gen < 4) + PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); + PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + } + PIPE_CONF_CHECK_I(pch_pfit.enabled); if (current_config->pch_pfit.enabled) { PIPE_CONF_CHECK_I(pch_pfit.pos); @@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) base.head) { memset(&crtc->config, 0, sizeof(crtc->config)); + crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; + crtc->active = dev_priv->display.get_pipe_config(crtc, &crtc->config); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d2a55884ad52..dfa85289f28f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, { struct drm_connector *connector = &intel_connector->base; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = NULL; bool has_dpcd; @@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; + /* The VDD bit needs a power domain reference, so if the bit is already + * enabled when we boot, grab this reference. */ + if (edp_have_panel_vdd(intel_dp)) { + enum intel_display_power_domain power_domain; + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + } + /* Cache DPCD and EDID for edp. */ intel_edp_panel_vdd_on(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0542de982260..328b1a70264b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -236,7 +236,8 @@ struct intel_crtc_config { * tracked with quirk flags so that fastboot and state checker can act * accordingly. */ -#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ unsigned long quirks; /* User requested mode, only valid as a starting point to diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index b4d44e62f0c7..fce4a0d93c0b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); + if (intel_fb && + (sizes->fb_width > intel_fb->base.width || + sizes->fb_height > intel_fb->base.height)) { + DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," + " releasing it\n", + intel_fb->base.width, intel_fb->base.height, + sizes->fb_width, sizes->fb_height); + drm_framebuffer_unreference(&intel_fb->base); + intel_fb = ifbdev->fb = NULL; + } if (!intel_fb || WARN_ON(!intel_fb->obj)) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b0413e190625..157267aa3561 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) } } -static int hdmi_portclock_limit(struct intel_hdmi *hdmi) +static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (!hdmi->has_hdmi_sink || IS_G4X(dev)) + if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -837,7 +837,8 @@ static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), + true)) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; @@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; - int portclock_limit = hdmi_portclock_limit(intel_hdmi); + int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); int desired_bpp; if (intel_hdmi->color_range_auto) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6bc68bdcf433..79fb4cc2137c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) I915_WRITE(HWS_PGA, addr); } -static int init_ring_common(struct intel_ring_buffer *ring) +static bool stop_ring(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj = ring->obj; - int ret = 0; - u32 head; + struct drm_i915_private *dev_priv = to_i915(ring->dev); - gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + if (!IS_GEN2(ring->dev)) { + I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + return false; + } + } - /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) - DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); - if (I915_NEED_GFX_HWS(dev)) - intel_ring_setup_status_page(ring); - else - ring_setup_phys_status_page(ring); + if (!IS_GEN2(ring->dev)) { + (void)I915_READ_CTL(ring); + I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); + } - head = I915_READ_HEAD(ring) & HEAD_ADDR; + return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; +} - /* G45 ring initialization fails to reset head to zero */ - if (head != 0) { +static int init_ring_common(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj = ring->obj; + int ret = 0; + + gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + + if (!stop_ring(ring)) { + /* G45 ring initialization often fails to reset head to zero */ DRM_DEBUG_KMS("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_TAIL(ring), I915_READ_START(ring)); - I915_WRITE_HEAD(ring, 0); - - if (I915_READ_HEAD(ring) & HEAD_ADDR) { + if (!stop_ring(ring)) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); + ret = -EIO; + goto out; } } + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 270a6a973438..2b91c4b4d34b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -34,6 +34,7 @@ struct intel_hw_status_page { #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) +#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 3e6c0f3ed592..ef9957dbac94 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc) MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); } else { /* disable cursor: */ - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), - MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), + mdp4_kms->blank_cursor_iova); } /* and drop the iova ref + obj rev when done scanning out: */ @@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, if (old_bo) { /* drop our previous reference: */ - msm_gem_put_iova(old_bo, mdp4_kms->id); - drm_gem_object_unreference_unlocked(old_bo); + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); } - crtc_flush(crtc); request_pending(crtc, PENDING_CURSOR); return 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index c740ccd1cc67..8edd531cb621 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) VERB("status=%08x", status); + mdp_dispatch_irqs(mdp_kms, status); + for (id = 0; id < priv->num_crtcs; id++) if (status & mdp4_crtc_vblank(priv->crtcs[id])) drm_handle_vblank(dev, id); - mdp_dispatch_irqs(mdp_kms, status); - return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 272e707c9487..0bb4faa17523 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + if (mdp4_kms->blank_cursor_iova) + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); + if (mdp4_kms->blank_cursor_bo) + drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); kfree(mdp4_kms); } @@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + mutex_lock(&dev->struct_mutex); + mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(mdp4_kms->blank_cursor_bo)) { + ret = PTR_ERR(mdp4_kms->blank_cursor_bo); + dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); + mdp4_kms->blank_cursor_bo = NULL; + goto fail; + } + + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, + &mdp4_kms->blank_cursor_iova); + if (ret) { + dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); + goto fail; + } + return kms; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 66a4d31aec80..715520c54cde 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -44,6 +44,10 @@ struct mdp4_kms { struct clk *lut_clk; struct mdp_irq error_handler; + + /* empty/blank cursor bo to use when cursor is "disabled" */ + struct drm_gem_object *blank_cursor_bo; + uint32_t blank_cursor_iova; }; #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 353d494a497f..f2b985bc2adf 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) VERB("status=%08x", status); + mdp_dispatch_irqs(mdp_kms, status); + for (id = 0; id < priv->num_crtcs; id++) if (status & mdp5_crtc_vblank(priv->crtcs[id])) drm_handle_vblank(dev, id); - - mdp_dispatch_irqs(mdp_kms, status); } irqreturn_t mdp5_irq(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 6c6d7d4c9b4e..a752ab83b810 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dma_addr_t paddr; int ret, size; - /* only doing ARGB32 since this is what is needed to alpha-blend - * with video overlays: - */ sizes->surface_bpp = 32; - sizes->surface_depth = 32; + sizes->surface_depth = 24; DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3da8264d3039..bb8026daebc9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj) if (iommu_present(&platform_bus_type)) drm_gem_put_pages(obj, msm_obj->pages, true, false); - else + else { drm_mm_remove_node(msm_obj->vram_node); + drm_free_large(msm_obj->pages); + } msm_obj->pages = NULL; } diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 36c717af6cf9..edb871d7d395 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) struct drm_device *drm = crtc->dev; struct drm_plane *plane; - list_for_each_entry(plane, &drm->mode_config.plane_list, head) { + drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { if (plane->crtc == crtc) { tegra_plane_disable(plane); plane->crtc = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 931490b9cfed..87df0b3674fd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdSurfaceDMA dma; } *cmd; int ret; + SVGA3dCmdSurfaceDMASuffix *suffix; + uint32_t bo_size; cmd = container_of(header, struct vmw_dma_cmd, header); + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + + header->size - sizeof(*suffix)); + + /* Make sure device and verifier stays in sync. */ + if (unlikely(suffix->suffixSize != sizeof(*suffix))) { + DRM_ERROR("Invalid DMA suffix size.\n"); + return -EINVAL; + } + ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->dma.guest.ptr, &vmw_bo); if (unlikely(ret != 0)) return ret; + /* Make sure DMA doesn't cross BO boundaries. */ + bo_size = vmw_bo->base.num_pages * PAGE_SIZE; + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { + DRM_ERROR("Invalid DMA offset.\n"); + return -EINVAL; + } + + bo_size -= cmd->dma.guest.ptr.offset; + if (unlikely(suffix->maximumOffset > bo_size)) + suffix->maximumOffset = bo_size; + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->dma.host.sid, NULL); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 6d02e3b06375..d76f0b70c6e0 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) if (cpu_has_tjmax(c)) dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { - val = (eax >> 16) & 0x7f; + val = (eax >> 16) & 0xff; /* * If the TjMax is not plausible, an assumption * will be used */ - if (val >= 85) { + if (val) { dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig index d4e8983fba53..23f38cf2c5cd 100644 --- a/drivers/infiniband/hw/cxgb4/Kconfig +++ b/drivers/infiniband/hw/cxgb4/Kconfig @@ -1,10 +1,10 @@ config INFINIBAND_CXGB4 - tristate "Chelsio T4 RDMA Driver" + tristate "Chelsio T4/T5 RDMA Driver" depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) select GENERIC_ALLOCATOR ---help--- - This is an iWARP/RDMA driver for the Chelsio T4 1GbE and - 10GbE adapters. + This is an iWARP/RDMA driver for the Chelsio T4 and T5 + 1GbE, 10GbE adapters and T5 40GbE adapter. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 185452abf32c..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { + opt2 |= T5_OPT_2_VALID; + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + } t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { @@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - state_set(&ep->com, ABORTING); + __state_set(&ep->com, ABORTING); set_bit(ABORT_CONN, &ep->com.history); return send_abort(ep, skb, gfp); } @@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) return credits; } -static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) +static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; @@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; int err; + int disconnect = 0; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); @@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * will abort the connection. */ if (stop_ep_timer(ep)) - return; + return 0; /* * If we get more than the supported amount of private data @@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) - return; + return 0; mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ @@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) - return; + return 0; if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; @@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; + attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; + disconnect = 1; goto out; } @@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; + attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; + disconnect = 1; goto out; } goto out; @@ -1366,7 +1375,7 @@ err: send_abort(ep, skb, GFP_KERNEL); out: connect_reply_upcall(ep, err); - return; + return disconnect; } static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) @@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; __u8 status = hdr->status; + int disconnect = 0; ep = lookup_tid(t, tid); if (!ep) @@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) switch (ep->com.state) { case MPA_REQ_SENT: ep->rcv_seq += dlen; - process_mpa_reply(ep, skb); + disconnect = process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: ep->rcv_seq += dlen; @@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) ep->com.state, ep->hwtid, status); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + disconnect = 1; break; } default: break; } mutex_unlock(&ep->com.mutex); + if (disconnect) + c4iw_ep_disconnect(ep, 0, GFP_KERNEL); return 0; } @@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN(1); } + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { + opt2 |= T5_OPT_2_VALID; + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + } rpl = cplhdr(skb); INIT_TP_WR(rpl, ep->hwtid); @@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep) __func__, ep, ep->hwtid, ep->com.state); abort = 0; } - mutex_unlock(&ep->com.mutex); if (abort) abort_connection(ep, NULL, GFP_KERNEL); + mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7b8c5806a09d..7474b490760a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -435,6 +435,7 @@ struct c4iw_qp_attributes { u8 ecode; u16 sq_db_inc; u16 rq_db_inc; + u8 send_term; }; struct c4iw_qp { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 7b5114cb486f..086f62f5dc9e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.ecode = attrs->ecode; ep = qhp->ep; - disconnect = 1; - c4iw_get_ep(&qhp->ep->com); - if (!internal) + if (!internal) { + c4iw_get_ep(&qhp->ep->com); terminate = 1; - else { + disconnect = 1; + } else { + terminate = qhp->attr.send_term; ret = rdma_fini(rhp, qhp, ep); if (ret) goto err; @@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, /* * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for * ringing the queue db when we're in DB_FULL mode. + * Only allow this on T4 devices. */ attrs.sq_db_inc = attr->sq_psn; attrs.rq_db_inc = attr->rq_psn; mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; + if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && + (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) + return -EINVAL; return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); } diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index dc193c292671..6121ca08fe58 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -836,4 +836,18 @@ struct ulptx_idata { #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) +enum { /* TCP congestion control algorithms */ + CONG_ALG_RENO, + CONG_ALG_TAHOE, + CONG_ALG_NEWRENO, + CONG_ALG_HIGHSPEED +}; + +#define S_CONG_CNTRL 14 +#define M_CONG_CNTRL 0x3 +#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) +#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) + +#define T5_OPT_2_VALID (1 << 31) + #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 41be897df8d5..3899ba7821c5 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -41,6 +41,7 @@ #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) +#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) #define ARMADA_375_PPI_CAUSE (0x10) @@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, struct msi_desc *desc) { struct msi_msg msg; - irq_hw_number_t hwirq; - int virq; + int virq, hwirq; hwirq = armada_370_xp_alloc_msi(); if (hwirq < 0) @@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); + unsigned long hwirq = d->hwirq; + irq_dispose_mapping(irq); - armada_370_xp_free_msi(d->hwirq); + armada_370_xp_free_msi(hwirq); +} + +static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, + int nvec, int type) +{ + /* We support MSI, but not MSI-X */ + if (type == PCI_CAP_ID_MSI) + return 0; + return -EINVAL; } static struct irq_chip armada_370_xp_msi_irq_chip = { @@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node, msi_chip->setup_irq = armada_370_xp_setup_msi_irq; msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; + msi_chip->check_device = armada_370_xp_check_msi_device; msi_chip->of_node = node; armada_370_xp_msi_domain = @@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock); static int armada_xp_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned long reg; - unsigned long new_mask = 0; - unsigned long online_mask = 0; - unsigned long count = 0; irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long reg, mask; int cpu; - for_each_cpu(cpu, mask_val) { - new_mask |= 1 << cpu_logical_map(cpu); - count++; - } - - /* - * Forbid mutlicore interrupt affinity - * This is required since the MPIC HW doesn't limit - * several CPUs from acknowledging the same interrupt. - */ - if (count > 1) - return -EINVAL; - - for_each_cpu(cpu, cpu_online_mask) - online_mask |= 1 << cpu_logical_map(cpu); + /* Select a single core from the affinity mask which is online */ + cpu = cpumask_any_and(mask_val, cpu_online_mask); + mask = 1UL << cpu_logical_map(cpu); raw_spin_lock(&irq_controller_lock); - reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - reg = (reg & (~online_mask)) | new_mask; + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - raw_spin_unlock(&irq_controller_lock); return 0; @@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, #ifdef CONFIG_SMP armada_xp_mpic_smp_cpu_init(); - - /* - * Set the default affinity from all CPUs to the boot cpu. - * This is required since the MPIC doesn't limit several CPUs - * from acknowledging the same interrupt. - */ - cpumask_clear(irq_default_affinity); - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); - #endif armada_370_xp_msi_init(node, main_int_res.start); diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index fc817d28d1fe..3d15d16a7088 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node) int i, size, max, reserved = 0, entry; const __be32 *irqsr; - cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); + cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return -ENOMEM; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1bf4a71919ec..9380be7b1895 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) } else { inc_hit_counter(cache, bio); + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 53728be84dee..13abade76ad9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -232,6 +232,13 @@ struct thin_c { struct bio_list deferred_bio_list; struct bio_list retry_on_resume_list; struct rb_root sort_bio_list; /* sorted list of deferred bios */ + + /* + * Ensures the thin is not destroyed until the worker has finished + * iterating the active_thins list. + */ + atomic_t refcount; + struct completion can_destroy; }; /*----------------------------------------------------------------*/ @@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc) blk_finish_plug(&plug); } +static void thin_get(struct thin_c *tc); +static void thin_put(struct thin_c *tc); + +/* + * We can't hold rcu_read_lock() around code that can block. So we + * find a thin with the rcu lock held; bump a refcount; then drop + * the lock. + */ +static struct thin_c *get_first_thin(struct pool *pool) +{ + struct thin_c *tc = NULL; + + rcu_read_lock(); + if (!list_empty(&pool->active_thins)) { + tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); + thin_get(tc); + } + rcu_read_unlock(); + + return tc; +} + +static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) +{ + struct thin_c *old_tc = tc; + + rcu_read_lock(); + list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { + thin_get(tc); + thin_put(old_tc); + rcu_read_unlock(); + return tc; + } + thin_put(old_tc); + rcu_read_unlock(); + + return NULL; +} + static void process_deferred_bios(struct pool *pool) { unsigned long flags; @@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool) struct bio_list bios; struct thin_c *tc; - rcu_read_lock(); - list_for_each_entry_rcu(tc, &pool->active_thins, list) + tc = get_first_thin(pool); + while (tc) { process_thin_deferred_bios(tc); - rcu_read_unlock(); + tc = get_next_thin(pool, tc); + } /* * If there are any deferred flush bios, we must commit @@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) { struct noflush_work w; - INIT_WORK(&w.worker, fn); + INIT_WORK_ONSTACK(&w.worker, fn); w.tc = tc; atomic_set(&w.complete, 0); init_waitqueue_head(&w.wait); @@ -3061,11 +3108,25 @@ static struct target_type pool_target = { /*---------------------------------------------------------------- * Thin target methods *--------------------------------------------------------------*/ +static void thin_get(struct thin_c *tc) +{ + atomic_inc(&tc->refcount); +} + +static void thin_put(struct thin_c *tc) +{ + if (atomic_dec_and_test(&tc->refcount)) + complete(&tc->can_destroy); +} + static void thin_dtr(struct dm_target *ti) { struct thin_c *tc = ti->private; unsigned long flags; + thin_put(tc); + wait_for_completion(&tc->can_destroy); + spin_lock_irqsave(&tc->pool->lock, flags); list_del_rcu(&tc->list); spin_unlock_irqrestore(&tc->pool->lock, flags); @@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) struct thin_c *tc; struct dm_dev *pool_dev, *origin_dev; struct mapped_device *pool_md; + unsigned long flags; mutex_lock(&dm_thin_pool_table.mutex); @@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) mutex_unlock(&dm_thin_pool_table.mutex); - spin_lock(&tc->pool->lock); + atomic_set(&tc->refcount, 1); + init_completion(&tc->can_destroy); + + spin_lock_irqsave(&tc->pool->lock, flags); list_add_tail_rcu(&tc->list, &tc->pool->active_thins); - spin_unlock(&tc->pool->lock); + spin_unlock_irqrestore(&tc->pool->lock, flags); /* * This synchronize_rcu() call is needed here otherwise we risk a * wake_worker() call finding no bios to process (because the newly diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 796007a5e0e1..7a7bab8947ae 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -330,15 +330,17 @@ test_block_hash: return r; } } - todo = 1 << v->data_dev_block_bits; - while (io->iter.bi_size) { + do { u8 *page; + unsigned len; struct bio_vec bv = bio_iter_iovec(bio, io->iter); page = kmap_atomic(bv.bv_page); - r = crypto_shash_update(desc, page + bv.bv_offset, - bv.bv_len); + len = bv.bv_len; + if (likely(len >= todo)) + len = todo; + r = crypto_shash_update(desc, page + bv.bv_offset, len); kunmap_atomic(page); if (r < 0) { @@ -346,8 +348,9 @@ test_block_hash: return r; } - bio_advance_iter(bio, &io->iter, bv.bv_len); - } + bio_advance_iter(bio, &io->iter, len); + todo -= len; + } while (todo); if (!v->version) { r = crypto_shash_update(desc, v->salt, v->salt_size); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 9bcf2cf19357..5aeb89411350 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) memset(r, 0, sizeof(*r)); /* - * Get optional "interrupts-names" property to add a name + * Get optional "interrupt-names" property to add a name * to the resource. */ of_property_read_string_index(dev, "interrupt-names", index, @@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) EXPORT_SYMBOL_GPL(of_irq_to_resource); /** + * of_irq_get - Decode a node's IRQ and return it as a Linux irq number + * @dev: pointer to device tree node + * @index: zero-based index of the irq + * + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain + * is not yet created. + * + */ +int of_irq_get(struct device_node *dev, int index) +{ + int rc; + struct of_phandle_args oirq; + struct irq_domain *domain; + + rc = of_irq_parse_one(dev, index, &oirq); + if (rc) + return rc; + + domain = irq_find_host(oirq.np); + if (!domain) + return -EPROBE_DEFER; + + return irq_create_of_mapping(&oirq); +} + +/** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node */ diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 404d1daebefa..bd47fbc53dc9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np, rc = of_address_to_resource(np, i, res); WARN_ON(rc); } - WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); + if (of_irq_to_resource_table(np, res, num_irq) != num_irq) + pr_debug("not all legacy IRQ resources mapped for %s\n", + np->name); } dev->dev.of_node = of_node_get(np); diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index ae4450070503..fe70b86bcffb 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void) } } +static void __init of_selftest_platform_populate(void) +{ + int irq; + struct device_node *np; + struct platform_device *pdev; + + np = of_find_node_by_path("/testcase-data"); + of_platform_populate(np, of_default_bus_match_table, NULL, NULL); + + /* Test that a missing irq domain returns -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device1"); + pdev = of_find_device_by_node(np); + if (!pdev) + selftest(0, "device 1 creation failed\n"); + irq = platform_get_irq(pdev, 0); + if (irq != -EPROBE_DEFER) + selftest(0, "device deferred probe failed - %d\n", irq); + + /* Test that a parsing failure does not return -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device2"); + pdev = of_find_device_by_node(np); + if (!pdev) + selftest(0, "device 2 creation failed\n"); + irq = platform_get_irq(pdev, 0); + if (irq >= 0 || irq == -EPROBE_DEFER) + selftest(0, "device parsing error failed - %d\n", irq); + + selftest(1, "passed"); +} + static int __init of_selftest(void) { struct device_node *np; @@ -445,6 +476,7 @@ static int __init of_selftest(void) of_selftest_parse_interrupts(); of_selftest_parse_interrupts_extended(); of_selftest_match_node(); + of_selftest_platform_populate(); pr_info("end of selftest - %i passed, %i failed\n", selftest_results.passed, selftest_results.failed); return 0; diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi index c843720bd3e5..da4695f60351 100644 --- a/drivers/of/testcase-data/tests-interrupts.dtsi +++ b/drivers/of/testcase-data/tests-interrupts.dtsi @@ -54,5 +54,18 @@ <&test_intmap1 1 2>; }; }; + + testcase-device1 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc0>; + interrupts = <1>; + }; + + testcase-device2 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc2>; + interrupts = <1>; /* invalid specifier - too short */ + }; }; + }; diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c index 92ed4b2e3c07..c862f9c0e9ce 100644 --- a/drivers/pinctrl/pinctrl-as3722.c +++ b/drivers/pinctrl/pinctrl-as3722.c @@ -64,7 +64,6 @@ struct as3722_pin_function { }; struct as3722_gpio_pin_control { - bool enable_gpio_invert; unsigned mode_prop; int io_function; }; @@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev, return mode; } - if (as_pci->gpio_control[offset].enable_gpio_invert) - mode |= AS3722_GPIO_INV; - - return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode); + return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset), + AS3722_GPIO_MODE_MASK, mode); } static const struct pinmux_ops as3722_pinmux_ops = { @@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset, { struct as3722_pctrl_info *as_pci = to_as_pci(chip); struct as3722 *as3722 = as_pci->as3722; - int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; + int en_invert; u32 val; int ret; + ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val); + if (ret < 0) { + dev_err(as_pci->dev, + "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret); + return; + } + en_invert = !!(val & AS3722_GPIO_INV); + if (value) val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); else diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 81075f2a1d3f..2960557bfed9 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = { static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, unsigned pin_pos) { + struct pcs_soc_data *pcs_soc = &pcs->socdata; struct pinctrl_pin_desc *pin; struct pcs_name *pn; int i; @@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, return -ENOMEM; } + if (pcs_soc->irq_enable_mask) { + unsigned val; + + val = pcs->read(pcs->base + offset); + if (val & pcs_soc->irq_enable_mask) { + dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", + (unsigned long)pcs->res->start + offset, val); + val &= ~pcs_soc->irq_enable_mask; + pcs->write(val, pcs->base + offset); + } + } + pin = &pcs->pins.pa[i]; pn = &pcs->names[i]; sprintf(pn->name, "%lx.%d", diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c index c5e0f6973a3b..26ca6855f478 100644 --- a/drivers/pinctrl/pinctrl-tb10x.c +++ b/drivers/pinctrl/pinctrl-tb10x.c @@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl, */ for (i = 0; i < state->pinfuncgrpcnt; i++) { const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; - unsigned int port = pfg->port; unsigned int mode = pfg->mode; - int j; + int j, port = pfg->port; /* * Skip pin groups which are always mapped and don't need diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index 48093719167a..f5cd3f961808 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_MSIOF0_SCK_B, 0, /* IP5_23_21 [3] */ FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, - FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, - FN_IERX_C, 0, + FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C, /* IP5_20_18 [3] */ FN_WE0_N, FN_IECLK, FN_CAN_CLK, FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 5186d70c49d4..7868bf3a0f91 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_SCIF3 [2] */ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, /* SEL_IEB [2] */ - FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, + FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, /* SEL_MMC [1] */ FN_SEL_MMC_0, FN_SEL_MMC_1, /* SEL_SCIF5 [1] */ diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 9f611cbbc294..c31aa07b3ba5 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; - struct acpi_buffer buffer; - int ret; + int ret = 0; pnp_dbg(&dev->dev, "set resources\n"); @@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) if (WARN_ON_ONCE(acpi_dev != dev->data)) dev->data = acpi_dev; - ret = pnpacpi_build_resource_template(dev, &buffer); - if (ret) - return ret; - ret = pnpacpi_encode_resources(dev, &buffer); - if (ret) { + if (acpi_has_method(handle, METHOD_NAME__SRS)) { + struct acpi_buffer buffer; + + ret = pnpacpi_build_resource_template(dev, &buffer); + if (ret) + return ret; + + ret = pnpacpi_encode_resources(dev, &buffer); + if (!ret) { + acpi_status status; + + status = acpi_set_current_resources(handle, &buffer); + if (ACPI_FAILURE(status)) + ret = -EIO; + } kfree(buffer.pointer); - return ret; } - if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) - ret = -EINVAL; - else if (acpi_bus_power_manageable(handle)) + if (!ret && acpi_bus_power_manageable(handle)) ret = acpi_bus_set_power(handle, ACPI_STATE_D0); - kfree(buffer.pointer); + return ret; } @@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; - int ret; + acpi_status status; dev_dbg(&dev->dev, "disable resources\n"); @@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) } /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ - ret = 0; if (acpi_bus_power_manageable(handle)) acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); - /* continue even if acpi_bus_set_power() fails */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) - ret = -ENODEV; - return ret; + + /* continue even if acpi_bus_set_power() fails */ + status = acpi_evaluate_object(handle, "_DIS", NULL, NULL); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) + return -ENODEV; + + return 0; } #ifdef CONFIG_ACPI_SLEEP diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 3736bc408adb..ebf0d6710b5a 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) } #endif -#ifdef CONFIG_X86 +#ifdef CONFIG_PCI /* Device IDs of parts that have 32KB MCH space */ static const unsigned int mch_quirk_devices[] = { 0x0154, /* Ivy Bridge */ @@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = { #ifdef CONFIG_AMD_NB {"PNP0c01", quirk_amd_mmconfig_area}, #endif -#ifdef CONFIG_X86 +#ifdef CONFIG_PCI {"PNP0c02", quirk_intel_mch}, #endif {""} diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 9f0ea6cb6922..e3bf885f4a6c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { - do { + static int ntsm_unsupported; + + while (true) { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; - sei->ntsm = ntsm; + if (!ntsm_unsupported) + sei->ntsm = ntsm; if (chsc(sei)) break; if (sei->response.code != 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", - sei->response.code); + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); + + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; + } break; } @@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } - } while (sei->u.nt0_area.flags & 0x80); + + if (!(sei->u.nt0_area.flags & 0x80)) + break; + } } /* diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7f0af4fcc001..6fd7d40b2c4d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_free_resources(ioc); pci_save_state(pdev); - pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 16bfd50cd3fe..db3b494e5926 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues; i++) + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } vscsi->affinity_hint_set = false; } |