diff options
author | Jani Nikula <jani.nikula@intel.com> | 2015-06-18 13:06:16 +0300 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-06-22 16:03:42 +0300 |
commit | 77913b39addfaa836929815515ff55cea1142b66 (patch) | |
tree | 704989b6f794f8a39e557b1b9f361ab6025123c9 /drivers/gpu/drm/i915 | |
parent | 10b0e9e904c409be8e2476058d9b19a6b37d619e (diff) | |
download | linux-77913b39addfaa836929815515ff55cea1142b66.tar.xz |
drm/i915: move generic hotplug code into new intel_hotplug.c file
We have enough generic hotplug functions sprinkled all over i915_irq.c
to warrant moving them to a file of their own. This should further
underline the distinction between generic code in the new file and
platform specific hotplug and irq code that remains in i915_irq.c.
Add new intel_hpd_init_work to keep work functions static, and rename
get_port_from_pin to intel_hpd_pin_to_port while increasing its
visibility, but keep everything else the same.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 409 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hotplug.c | 452 |
5 files changed, 464 insertions, 422 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b7ddf48e1d75..de2196543367 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -34,6 +34,7 @@ i915-y += i915_cmd_parser.o \ i915_gpu_error.o \ i915_irq.o \ i915_trace_points.o \ + intel_hotplug.o \ intel_lrc.o \ intel_ringbuffer.o \ intel_uncore.o diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 78ef0bb53c36..0ec57bad454f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -541,21 +541,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return true; } -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) -{ - spin_lock_irq(&dev_priv->irq_lock); - - dev_priv->hotplug.long_port_mask = 0; - dev_priv->hotplug.short_port_mask = 0; - dev_priv->hotplug.event_bits = 0; - - spin_unlock_irq(&dev_priv->irq_lock); - - cancel_work_sync(&dev_priv->hotplug.dig_port_work); - cancel_work_sync(&dev_priv->hotplug.hotplug_work); - cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); -} - void i915_firmware_load_error_print(const char *fw_path, int err) { DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3e36af90f943..c3b9fcf301a0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2601,9 +2601,15 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); void i915_firmware_load_error_print(const char *fw_path, int err); +/* intel_hotplug.c */ +void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_init(struct drm_i915_private *dev_priv); +void intel_hpd_init_work(struct drm_i915_private *dev_priv); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); +enum port intel_hpd_pin_to_port(enum hpd_pin pin); + /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); __printf(3, 4) @@ -2611,7 +2617,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); -extern void intel_hpd_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ce9eef5f722c..2d3b2ccf9ce4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -807,125 +807,6 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, &crtc->hwmode); } -static bool intel_hpd_irq_event(struct drm_device *dev, - struct drm_connector *connector) -{ - enum drm_connector_status old_status; - - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - old_status = connector->status; - - connector->status = connector->funcs->detect(connector, false); - if (old_status == connector->status) - return false; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", - connector->base.id, - connector->name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->status)); - - return true; -} - -static void i915_digport_work_func(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.dig_port_work); - u32 long_port_mask, short_port_mask; - struct intel_digital_port *intel_dig_port; - int i; - u32 old_bits = 0; - - spin_lock_irq(&dev_priv->irq_lock); - long_port_mask = dev_priv->hotplug.long_port_mask; - dev_priv->hotplug.long_port_mask = 0; - short_port_mask = dev_priv->hotplug.short_port_mask; - dev_priv->hotplug.short_port_mask = 0; - spin_unlock_irq(&dev_priv->irq_lock); - - for (i = 0; i < I915_MAX_PORTS; i++) { - bool valid = false; - bool long_hpd = false; - intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port || !intel_dig_port->hpd_pulse) - continue; - - if (long_port_mask & (1 << i)) { - valid = true; - long_hpd = true; - } else if (short_port_mask & (1 << i)) - valid = true; - - if (valid) { - enum irqreturn ret; - - ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); - if (ret == IRQ_NONE) { - /* fall back to old school hpd */ - old_bits |= (1 << intel_dig_port->base.hpd_pin); - } - } - } - - if (old_bits) { - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.event_bits |= old_bits; - spin_unlock_irq(&dev_priv->irq_lock); - schedule_work(&dev_priv->hotplug.hotplug_work); - } -} - -/* - * Handle hotplug events outside the interrupt handler proper. - */ -static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv); - -static void i915_hotplug_work_func(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.hotplug_work); - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_connector *intel_connector; - struct intel_encoder *intel_encoder; - struct drm_connector *connector; - bool changed = false; - u32 hpd_event_bits; - - mutex_lock(&mode_config->mutex); - DRM_DEBUG_KMS("running encoder hotplug functions\n"); - - spin_lock_irq(&dev_priv->irq_lock); - - hpd_event_bits = dev_priv->hotplug.event_bits; - dev_priv->hotplug.event_bits = 0; - - /* Disable hotplug on connectors that hit an irq storm. */ - intel_hpd_irq_storm_disable(dev_priv); - - spin_unlock_irq(&dev_priv->irq_lock); - - list_for_each_entry(connector, &mode_config->connector_list, head) { - intel_connector = to_intel_connector(connector); - if (!intel_connector->encoder) - continue; - intel_encoder = intel_connector->encoder; - if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { - DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", - connector->name, intel_encoder->hpd_pin); - if (intel_encoder->hot_plug) - intel_encoder->hot_plug(intel_encoder); - if (intel_hpd_irq_event(dev, connector)) - changed = true; - } - } - mutex_unlock(&mode_config->mutex); - - if (changed) - drm_kms_helper_hotplug_event(dev); -} - static void ironlake_rps_change_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1346,94 +1227,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, return ret; } -#define HPD_STORM_DETECT_PERIOD 1000 -#define HPD_STORM_THRESHOLD 5 - -/** - * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin - * @dev_priv: private driver data pointer - * @pin: the pin to gather stats on - * - * Gather stats about HPD irqs from the specified @pin, and detect irq - * storms. Only the pin specific stats and state are changed, the caller is - * responsible for further action. - * - * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms, - * otherwise it's considered an irq storm, and the irq state is set to - * @HPD_MARK_DISABLED. - * - * Return true if an irq storm was detected on @pin. - */ -static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, - enum hpd_pin pin) -{ - unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; - unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); - bool storm = false; - - if (!time_in_range(jiffies, start, end)) { - dev_priv->hotplug.stats[pin].last_jiffies = jiffies; - dev_priv->hotplug.stats[pin].count = 0; - DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); - } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { - dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; - DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); - storm = true; - } else { - dev_priv->hotplug.stats[pin].count++; - DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, - dev_priv->hotplug.stats[pin].count); - } - - return storm; -} - -#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) - -static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_connector *intel_connector; - struct intel_encoder *intel_encoder; - struct drm_connector *connector; - enum hpd_pin pin; - bool hpd_disabled = false; - - assert_spin_locked(&dev_priv->irq_lock); - - list_for_each_entry(connector, &mode_config->connector_list, head) { - if (connector->polled != DRM_CONNECTOR_POLL_HPD) - continue; - - intel_connector = to_intel_connector(connector); - intel_encoder = intel_connector->encoder; - if (!intel_encoder) - continue; - - pin = intel_encoder->hpd_pin; - if (pin == HPD_NONE || - dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) - continue; - - DRM_INFO("HPD interrupt storm detected on connector %s: " - "switching from hotplug detection to polling\n", - connector->name); - - dev_priv->hotplug.stats[pin].state = HPD_DISABLED; - connector->polled = DRM_CONNECTOR_POLL_CONNECT - | DRM_CONNECTOR_POLL_DISCONNECT; - hpd_disabled = true; - } - - /* Enable polling and queue hotplug re-enabling. */ - if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); - mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, - msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); - } -} - static bool pch_port_hotplug_long_detect(enum port port, u32 val) { switch (port) { @@ -1462,20 +1255,6 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) } } -static enum port get_port_from_pin(enum hpd_pin pin) -{ - switch (pin) { - case HPD_PORT_B: - return PORT_B; - case HPD_PORT_C: - return PORT_C; - case HPD_PORT_D: - return PORT_D; - default: - return PORT_A; /* no hpd */ - } -} - /* Get a bit mask of pins that have triggered, and which ones may be long. */ static void pch_get_hpd_pins(u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS]) @@ -1492,7 +1271,7 @@ static void pch_get_hpd_pins(u32 *pin_mask, u32 *long_mask, if (hpd[i] & hotplug_trigger) { *pin_mask |= BIT(i); - if (pch_port_hotplug_long_detect(get_port_from_pin(i), dig_hotplug_reg)) + if (pch_port_hotplug_long_detect(intel_hpd_pin_to_port(i), dig_hotplug_reg)) *long_mask |= BIT(i); } } @@ -1518,7 +1297,7 @@ static void i9xx_get_hpd_pins(u32 *pin_mask, u32 *long_mask, if (hpd[i] & hotplug_trigger) { *pin_mask |= BIT(i); - if (i9xx_port_hotplug_long_detect(get_port_from_pin(i), hotplug_trigger)) + if (i9xx_port_hotplug_long_detect(intel_hpd_pin_to_port(i), hotplug_trigger)) *long_mask |= BIT(i); } } @@ -1527,104 +1306,6 @@ static void i9xx_get_hpd_pins(u32 *pin_mask, u32 *long_mask, hotplug_trigger, *pin_mask); } -/** - * intel_hpd_irq_handler - main hotplug irq handler - * @dev: drm device - * @pin_mask: a mask of hpd pins that have triggered the irq - * @long_mask: a mask of hpd pins that may be long hpd pulses - * - * This is the main hotplug irq handler for all platforms. The platform specific - * irq handlers call the platform specific hotplug irq handlers, which read and - * decode the appropriate registers into bitmasks about hpd pins that have - * triggered (@pin_mask), and which of those pins may be long pulses - * (@long_mask). The @long_mask is ignored if the port corresponding to the pin - * is not a digital port. - * - * Here, we do hotplug irq storm detection and mitigation, and pass further - * processing to appropriate bottom halves. - */ -static void intel_hpd_irq_handler(struct drm_device *dev, - u32 pin_mask, u32 long_mask) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - enum port port; - bool storm_detected = false; - bool queue_dig = false, queue_hp = false; - bool is_dig_port; - - if (!pin_mask) - return; - - spin_lock(&dev_priv->irq_lock); - for_each_hpd_pin(i) { - if (!(BIT(i) & pin_mask)) - continue; - - port = get_port_from_pin(i); - is_dig_port = port && dev_priv->hotplug.irq_port[port]; - - if (is_dig_port) { - bool long_hpd = long_mask & BIT(i); - - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), - long_hpd ? "long" : "short"); - /* - * For long HPD pulses we want to have the digital queue happen, - * but we still want HPD storm detection to function. - */ - queue_dig = true; - if (long_hpd) { - dev_priv->hotplug.long_port_mask |= (1 << port); - } else { - /* for short HPD just trigger the digital queue */ - dev_priv->hotplug.short_port_mask |= (1 << port); - continue; - } - } - - if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { - /* - * On GMCH platforms the interrupt mask bits only - * prevent irq generation, not the setting of the - * hotplug bits itself. So only WARN about unexpected - * interrupts on saner platforms. - */ - WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), - "Received HPD interrupt on pin %d although disabled\n", i); - continue; - } - - if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) - continue; - - if (!is_dig_port) { - dev_priv->hotplug.event_bits |= BIT(i); - queue_hp = true; - } - - if (intel_hpd_irq_storm_detect(dev_priv, i)) { - dev_priv->hotplug.event_bits &= ~BIT(i); - storm_detected = true; - } - } - - if (storm_detected) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock(&dev_priv->irq_lock); - - /* - * Our hotplug handler can grab modeset locks (by calling down into the - * fb helpers). Hence it must not be run on our own dev-priv->wq work - * queue for otherwise the flush_work in the pageflip code will - * deadlock. - */ - if (queue_dig) - queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); - if (queue_hp) - schedule_work(&dev_priv->hotplug.hotplug_work); -} - static void gmbus_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4367,46 +4048,6 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), - hotplug.reenable_work.work); - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - int i; - - intel_runtime_pm_get(dev_priv); - - spin_lock_irq(&dev_priv->irq_lock); - for_each_hpd_pin(i) { - struct drm_connector *connector; - - if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) - continue; - - dev_priv->hotplug.stats[i].state = HPD_ENABLED; - - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - if (intel_connector->encoder->hpd_pin == i) { - if (connector->polled != intel_connector->polled) - DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", - connector->name); - connector->polled = intel_connector->polled; - if (!connector->polled) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - } - } - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irq(&dev_priv->irq_lock); - - intel_runtime_pm_put(dev_priv); -} - /** * intel_irq_init - initializes irq support * @dev_priv: i915 device instance @@ -4418,8 +4059,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); + intel_hpd_init_work(dev_priv); + INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); @@ -4432,8 +4073,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); - INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, - intel_hpd_irq_storm_reenable_work); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); @@ -4519,46 +4158,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) } /** - * intel_hpd_init - initializes and enables hpd support - * @dev_priv: i915 device instance - * - * This function enables the hotplug support. It requires that interrupts have - * already been enabled with intel_irq_init_hw(). From this point on hotplug and - * poll request can run concurrently to other code, so locking rules must be - * obeyed. - * - * This is a separate step from interrupt enabling to simplify the locking rules - * in the driver load and resume code. - */ -void intel_hpd_init(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; - int i; - - for_each_hpd_pin(i) { - dev_priv->hotplug.stats[i].count = 0; - dev_priv->hotplug.stats[i].state = HPD_ENABLED; - } - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); - connector->polled = intel_connector->polled; - if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; - if (intel_connector->mst_port) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked checks happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irq(&dev_priv->irq_lock); -} - -/** * intel_irq_install - enables the hardware interrupt * @dev_priv: i915 device instance * diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c new file mode 100644 index 000000000000..3c53aac71d98 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -0,0 +1,452 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/kernel.h> + +#include <drm/drmP.h> +#include <drm/i915_drm.h> + +#include "i915_drv.h" +#include "intel_drv.h" + +enum port intel_hpd_pin_to_port(enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_B: + return PORT_B; + case HPD_PORT_C: + return PORT_C; + case HPD_PORT_D: + return PORT_D; + default: + return PORT_A; /* no hpd */ + } +} + +#define HPD_STORM_DETECT_PERIOD 1000 +#define HPD_STORM_THRESHOLD 5 +#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) + +/** + * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin + * @dev_priv: private driver data pointer + * @pin: the pin to gather stats on + * + * Gather stats about HPD irqs from the specified @pin, and detect irq + * storms. Only the pin specific stats and state are changed, the caller is + * responsible for further action. + * + * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms, + * otherwise it's considered an irq storm, and the irq state is set to + * @HPD_MARK_DISABLED. + * + * Return true if an irq storm was detected on @pin. + */ +static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, + enum hpd_pin pin) +{ + unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; + unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); + bool storm = false; + + if (!time_in_range(jiffies, start, end)) { + dev_priv->hotplug.stats[pin].last_jiffies = jiffies; + dev_priv->hotplug.stats[pin].count = 0; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); + } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { + dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; + DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); + storm = true; + } else { + dev_priv->hotplug.stats[pin].count++; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, + dev_priv->hotplug.stats[pin].count); + } + + return storm; +} + +static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_connector *intel_connector; + struct intel_encoder *intel_encoder; + struct drm_connector *connector; + enum hpd_pin pin; + bool hpd_disabled = false; + + assert_spin_locked(&dev_priv->irq_lock); + + list_for_each_entry(connector, &mode_config->connector_list, head) { + if (connector->polled != DRM_CONNECTOR_POLL_HPD) + continue; + + intel_connector = to_intel_connector(connector); + intel_encoder = intel_connector->encoder; + if (!intel_encoder) + continue; + + pin = intel_encoder->hpd_pin; + if (pin == HPD_NONE || + dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) + continue; + + DRM_INFO("HPD interrupt storm detected on connector %s: " + "switching from hotplug detection to polling\n", + connector->name); + + dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + connector->polled = DRM_CONNECTOR_POLL_CONNECT + | DRM_CONNECTOR_POLL_DISCONNECT; + hpd_disabled = true; + } + + /* Enable polling and queue hotplug re-enabling. */ + if (hpd_disabled) { + drm_kms_helper_poll_enable(dev); + mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, + msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); + } +} + +static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + hotplug.reenable_work.work); + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + int i; + + intel_runtime_pm_get(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + for_each_hpd_pin(i) { + struct drm_connector *connector; + + if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) + continue; + + dev_priv->hotplug.stats[i].state = HPD_ENABLED; + + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (intel_connector->encoder->hpd_pin == i) { + if (connector->polled != intel_connector->polled) + DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", + connector->name); + connector->polled = intel_connector->polled; + if (!connector->polled) + connector->polled = DRM_CONNECTOR_POLL_HPD; + } + } + } + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irq(&dev_priv->irq_lock); + + intel_runtime_pm_put(dev_priv); +} + +static bool intel_hpd_irq_event(struct drm_device *dev, + struct drm_connector *connector) +{ + enum drm_connector_status old_status; + + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + old_status = connector->status; + + connector->status = connector->funcs->detect(connector, false); + if (old_status == connector->status) + return false; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); + + return true; +} + +static void i915_digport_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, hotplug.dig_port_work); + u32 long_port_mask, short_port_mask; + struct intel_digital_port *intel_dig_port; + int i; + u32 old_bits = 0; + + spin_lock_irq(&dev_priv->irq_lock); + long_port_mask = dev_priv->hotplug.long_port_mask; + dev_priv->hotplug.long_port_mask = 0; + short_port_mask = dev_priv->hotplug.short_port_mask; + dev_priv->hotplug.short_port_mask = 0; + spin_unlock_irq(&dev_priv->irq_lock); + + for (i = 0; i < I915_MAX_PORTS; i++) { + bool valid = false; + bool long_hpd = false; + intel_dig_port = dev_priv->hotplug.irq_port[i]; + if (!intel_dig_port || !intel_dig_port->hpd_pulse) + continue; + + if (long_port_mask & (1 << i)) { + valid = true; + long_hpd = true; + } else if (short_port_mask & (1 << i)) + valid = true; + + if (valid) { + enum irqreturn ret; + + ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); + if (ret == IRQ_NONE) { + /* fall back to old school hpd */ + old_bits |= (1 << intel_dig_port->base.hpd_pin); + } + } + } + + if (old_bits) { + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->hotplug.event_bits |= old_bits; + spin_unlock_irq(&dev_priv->irq_lock); + schedule_work(&dev_priv->hotplug.hotplug_work); + } +} + +/* + * Handle hotplug events outside the interrupt handler proper. + */ +static void i915_hotplug_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, hotplug.hotplug_work); + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_connector *intel_connector; + struct intel_encoder *intel_encoder; + struct drm_connector *connector; + bool changed = false; + u32 hpd_event_bits; + + mutex_lock(&mode_config->mutex); + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + + spin_lock_irq(&dev_priv->irq_lock); + + hpd_event_bits = dev_priv->hotplug.event_bits; + dev_priv->hotplug.event_bits = 0; + + /* Disable hotplug on connectors that hit an irq storm. */ + intel_hpd_irq_storm_disable(dev_priv); + + spin_unlock_irq(&dev_priv->irq_lock); + + list_for_each_entry(connector, &mode_config->connector_list, head) { + intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; + intel_encoder = intel_connector->encoder; + if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { + DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", + connector->name, intel_encoder->hpd_pin); + if (intel_encoder->hot_plug) + intel_encoder->hot_plug(intel_encoder); + if (intel_hpd_irq_event(dev, connector)) + changed = true; + } + } + mutex_unlock(&mode_config->mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + + +/** + * intel_hpd_irq_handler - main hotplug irq handler + * @dev: drm device + * @pin_mask: a mask of hpd pins that have triggered the irq + * @long_mask: a mask of hpd pins that may be long hpd pulses + * + * This is the main hotplug irq handler for all platforms. The platform specific + * irq handlers call the platform specific hotplug irq handlers, which read and + * decode the appropriate registers into bitmasks about hpd pins that have + * triggered (@pin_mask), and which of those pins may be long pulses + * (@long_mask). The @long_mask is ignored if the port corresponding to the pin + * is not a digital port. + * + * Here, we do hotplug irq storm detection and mitigation, and pass further + * processing to appropriate bottom halves. + */ +void intel_hpd_irq_handler(struct drm_device *dev, + u32 pin_mask, u32 long_mask) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + enum port port; + bool storm_detected = false; + bool queue_dig = false, queue_hp = false; + bool is_dig_port; + + if (!pin_mask) + return; + + spin_lock(&dev_priv->irq_lock); + for_each_hpd_pin(i) { + if (!(BIT(i) & pin_mask)) + continue; + + port = intel_hpd_pin_to_port(i); + is_dig_port = port && dev_priv->hotplug.irq_port[port]; + + if (is_dig_port) { + bool long_hpd = long_mask & BIT(i); + + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + long_hpd ? "long" : "short"); + /* + * For long HPD pulses we want to have the digital queue happen, + * but we still want HPD storm detection to function. + */ + queue_dig = true; + if (long_hpd) { + dev_priv->hotplug.long_port_mask |= (1 << port); + } else { + /* for short HPD just trigger the digital queue */ + dev_priv->hotplug.short_port_mask |= (1 << port); + continue; + } + } + + if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { + /* + * On GMCH platforms the interrupt mask bits only + * prevent irq generation, not the setting of the + * hotplug bits itself. So only WARN about unexpected + * interrupts on saner platforms. + */ + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), + "Received HPD interrupt on pin %d although disabled\n", i); + continue; + } + + if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) + continue; + + if (!is_dig_port) { + dev_priv->hotplug.event_bits |= BIT(i); + queue_hp = true; + } + + if (intel_hpd_irq_storm_detect(dev_priv, i)) { + dev_priv->hotplug.event_bits &= ~BIT(i); + storm_detected = true; + } + } + + if (storm_detected) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock(&dev_priv->irq_lock); + + /* + * Our hotplug handler can grab modeset locks (by calling down into the + * fb helpers). Hence it must not be run on our own dev-priv->wq work + * queue for otherwise the flush_work in the pageflip code will + * deadlock. + */ + if (queue_dig) + queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); + if (queue_hp) + schedule_work(&dev_priv->hotplug.hotplug_work); +} + +/** + * intel_hpd_init - initializes and enables hpd support + * @dev_priv: i915 device instance + * + * This function enables the hotplug support. It requires that interrupts have + * already been enabled with intel_irq_init_hw(). From this point on hotplug and + * poll request can run concurrently to other code, so locking rules must be + * obeyed. + * + * This is a separate step from interrupt enabling to simplify the locking rules + * in the driver load and resume code. + */ +void intel_hpd_init(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + int i; + + for_each_hpd_pin(i) { + dev_priv->hotplug.stats[i].count = 0; + dev_priv->hotplug.stats[i].state = HPD_ENABLED; + } + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_connector *intel_connector = to_intel_connector(connector); + connector->polled = intel_connector->polled; + if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + connector->polled = DRM_CONNECTOR_POLL_HPD; + if (intel_connector->mst_port) + connector->polled = DRM_CONNECTOR_POLL_HPD; + } + + /* + * Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked checks happy. + */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irq(&dev_priv->irq_lock); +} + +void intel_hpd_init_work(struct drm_i915_private *dev_priv) +{ + INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); + INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, + intel_hpd_irq_storm_reenable_work); +} + +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + + dev_priv->hotplug.long_port_mask = 0; + dev_priv->hotplug.short_port_mask = 0; + dev_priv->hotplug.event_bits = 0; + + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->hotplug.dig_port_work); + cancel_work_sync(&dev_priv->hotplug.hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); +} |