summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-02-16 02:36:04 +0300
committerDave Airlie <airlied@redhat.com>2018-02-16 02:36:04 +0300
commit933519a5a269d8460450545adefcb5caec622cac (patch)
tree7fdbd025aa411dd4dbc2d0ec81095bea3bc9478a /drivers
parent76ea0f334e7fb13226e64ee7de928611f5303faf (diff)
parent2834d9dfaf0276e197158be6af8e1a1d59e58289 (diff)
downloadlinux-933519a5a269d8460450545adefcb5caec622cac.tar.xz
Merge tag 'topic/hdcp-2018-02-13' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
Add HDCP support to i915 drm driver. * tag 'topic/hdcp-2018-02-13' of git://anongit.freedesktop.org/drm/drm-misc: (26 commits) drm/i915: fix misalignment in HDCP register def drm/i915: Reauthenticate HDCP on failure drm/i915: Detect panel's hdcp capability drm/i915: Optimize HDCP key load drm/i915: Retry HDCP bksv read drm/i915: Connector info in HDCP debug msgs drm/i915: Stop encryption for repeater with no sink drm/i915: Handle failure from 2nd stage HDCP auth drm/i915: Downgrade hdcp logs from INFO to DEBUG_KMS drm/i915: Restore HDCP DRM_INFO when with no downstream drm/i915: Check for downstream topology errors drm/i915: Start repeater auth on READY/CP_IRQ drm/i915: II stage HDCP auth for repeater only drm/i915: Extending HDCP for HSW, BDW and BXT+ drm/i915/dp: Fix compilation of intel_dp_hdcp_check_link drm/i915: Only disable HDCP when it's active drm/i915: Don't allow HDCP on PORT E/F drm/i915: Implement HDCP for DisplayPort drm/i915: Implement HDCP for HDMI drm/i915: Add function to output Aksv over GMBUS ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c87
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h85
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c277
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h109
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c807
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c250
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c81
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h14
15 files changed, 1742 insertions, 43 deletions
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d9ad20040a1..46733d534587 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1224,6 +1224,12 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->picture_aspect_ratio = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
+ } else if (property == connector->content_protection_property) {
+ if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
+ return -EINVAL;
+ }
+ state->content_protection = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -1303,6 +1309,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->picture_aspect_ratio;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
+ } else if (property == connector->content_protection_property) {
+ *val = state->content_protection;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index d6a7eba087a0..16b9c3810af2 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -761,6 +761,13 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static struct drm_prop_enum_list drm_cp_enum_list[] = {
+ { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
+ { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
+ { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
+};
+DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+
/**
* DOC: standard connector properties
*
@@ -822,14 +829,50 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* should update this value using drm_mode_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
- * Connector link-status property to indicate the status of link. The default
- * value of link-status is "GOOD". If something fails during or after modeset,
- * the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
- * should update this value using drm_mode_connector_set_link_status_property().
+ * Connector link-status property to indicate the status of link. The
+ * default value of link-status is "GOOD". If something fails during or
+ * after modeset, the kernel driver may set this to "BAD" and issue a
+ * hotplug uevent. Drivers should update this value using
+ * drm_mode_connector_set_link_status_property().
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
* the output device is not rectilinear.
+ * Content Protection:
+ * This property is used by userspace to request the kernel protect future
+ * content communicated over the link. When requested, kernel will apply
+ * the appropriate means of protection (most often HDCP), and use the
+ * property to tell userspace the protection is active.
+ *
+ * Drivers can set this up by calling
+ * drm_connector_attach_content_protection_property() on initialization.
+ *
+ * The value of this property can be one of the following:
+ *
+ * - DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0
+ * The link is not protected, content is transmitted in the clear.
+ * - DRM_MODE_CONTENT_PROTECTION_DESIRED = 1
+ * Userspace has requested content protection, but the link is not
+ * currently protected. When in this state, kernel should enable
+ * Content Protection as soon as possible.
+ * - DRM_MODE_CONTENT_PROTECTION_ENABLED = 2
+ * Userspace has requested content protection, and the link is
+ * protected. Only the driver can set the property to this value.
+ * If userspace attempts to set to ENABLED, kernel will return
+ * -EINVAL.
+ *
+ * A few guidelines:
+ *
+ * - DESIRED state should be preserved until userspace de-asserts it by
+ * setting the property to UNDESIRED. This means ENABLED should only
+ * transition to UNDESIRED when the user explicitly requests it.
+ * - If the state is DESIRED, kernel should attempt to re-authenticate the
+ * link whenever possible. This includes across disable/enable, dpms,
+ * hotplug, downstream device changes, link status failures, etc..
+ * - Userspace is responsible for polling the property to determine when
+ * the value transitions from ENABLED to DESIRED. This signifies the link
+ * is no longer protected and userspace should take appropriate action
+ * (whatever that might be).
*
* Connectors also have one standardized atomic property:
*
@@ -1131,6 +1174,42 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property);
/**
+ * drm_connector_attach_content_protection_property - attach content protection
+ * property
+ *
+ * @connector: connector to attach CP property on.
+ *
+ * This is used to add support for content protection on select connectors.
+ * Content Protection is intentionally vague to allow for different underlying
+ * technologies, however it is most implemented by HDCP.
+ *
+ * The content protection will be set to &drm_connector_state.content_protection
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ prop = drm_property_create_enum(dev, 0, "Content Protection",
+ drm_cp_enum_list,
+ ARRAY_SIZE(drm_cp_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&connector->base, prop,
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ connector->content_protection_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
+
+/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4d9e2f855e9d..3bddd8a06806 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -109,6 +109,7 @@ i915-y += intel_audio.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
+ intel_hdcp.o \
intel_hotplug.o \
intel_modes.o \
intel_overlay.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 60079a69c2f9..ad1fc845cd1b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3661,6 +3661,7 @@ extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin);
+extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1489dd3b3ec2..e9c79b560823 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3074,6 +3074,7 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
+#define GMBUS_AKSV_SELECT (1<<11)
#define GMBUS_RATE_100KHZ (0<<8)
#define GMBUS_RATE_50KHZ (1<<8)
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
@@ -8165,6 +8166,7 @@ enum {
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
#define SKL_CDCLK_READY_FOR_CHANGE 0x1
@@ -8480,6 +8482,88 @@ enum skl_power_gate {
#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS BIT(7)
+#define HDCP_FUSE_ERROR BIT(6)
+#define HDCP_FUSE_DONE BIT(5)
+#define HDCP_KEY_LOAD_STATUS BIT(1)
+#define HDCP_KEY_LOAD_DONE BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_DDIB_REP_PRESENT BIT(30)
+#define HDCP_DDIA_REP_PRESENT BIT(29)
+#define HDCP_DDIC_REP_PRESENT BIT(28)
+#define HDCP_DDID_REP_PRESENT BIT(27)
+#define HDCP_DDIF_REP_PRESENT BIT(26)
+#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY BIT(16)
+#define HDCP_SHA1_READY BIT(17)
+#define HDCP_SHA1_COMPLETE BIT(18)
+#define HDCP_SHA1_V_MATCH BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + x)
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define HDCP_CONF_CAPTURE_AN BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define HDCP_STATUS_STREAM_A_ENC BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC BIT(28)
+#define HDCP_STATUS_AUTH BIT(21)
+#define HDCP_STATUS_ENC BIT(20)
+#define HDCP_STATUS_RI_MATCH BIT(19)
+#define HDCP_STATUS_R0_READY BIT(18)
+#define HDCP_STATUS_AN_READY BIT(17)
+#define HDCP_STATUS_CIPHER BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff)
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -8511,6 +8595,7 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_HDCP_SIGNALLING (1<<9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 36d4e635e4ce..d452c327dc1d 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
to_intel_digital_connector_state(old_state);
struct drm_crtc_state *crtc_state;
+ intel_hdcp_atomic_check(conn, old_state, new_state);
+
if (!new_state->crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d9b52fe82932..cfcd9cb37d5d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1615,6 +1615,35 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val);
}
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = 0;
+ int ret = 0;
+ uint32_t tmp;
+
+ if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
+ intel_encoder->power_domain)))
+ return -ENXIO;
+
+ if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+ if (enable)
+ tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ else
+ tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+out:
+ intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ return ret;
+}
+
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
@@ -2465,6 +2494,11 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
else
intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+
+ /* Enable hdcp if it's desired */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector));
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -2499,6 +2533,8 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
+
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
else
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 15523f0e3b44..60ba5bb3f34c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15297,6 +15297,10 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
+ if (connector->hdcp_shim) {
+ cancel_delayed_work_sync(&connector->hdcp_check_work);
+ cancel_work_sync(&connector->hdcp_prop_work);
+ }
}
drm_connector_list_iter_end(&conn_iter);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8503d182921b..f10a14330e7c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,7 +36,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -1060,10 +1062,29 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
+static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider,
+ bool aksv_write)
+{
+ uint32_t val = 0;
+
+ if (aksv_write) {
+ send_bytes += 5;
+ val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+ }
+
+ return val | intel_dp->get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
const uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size)
+ uint8_t *recv, int recv_size, bool aksv_write)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@@ -1123,10 +1144,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
}
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- has_aux_irq,
- send_bytes,
- aux_clock_divider);
+ u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp,
+ has_aux_irq,
+ send_bytes,
+ aux_clock_divider,
+ aksv_write);
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
@@ -1263,7 +1285,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
+ false);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
@@ -1285,7 +1308,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (WARN_ON(rxsize > 20))
return -E2BIG;
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
+ false);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
@@ -5025,6 +5049,236 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+ u8 *an)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+ uint8_t txbuf[4], rxbuf[2], reply = 0;
+ ssize_t dpcd_ret;
+ int ret;
+
+ /* Output An first, that's easy */
+ dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
+ an, DRM_HDCP_AN_LEN);
+ if (dpcd_ret != DRM_HDCP_AN_LEN) {
+ DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+
+ /*
+ * Since Aksv is Oh-So-Secret, we can't access it in software. So in
+ * order to get it on the wire, we need to create the AUX header as if
+ * we were writing the data, and then tickle the hardware to output the
+ * data once the header is sent out.
+ */
+ txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) |
+ ((DP_AUX_HDCP_AKSV >> 16) & 0xf);
+ txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff;
+ txbuf[2] = DP_AUX_HDCP_AKSV & 0xff;
+ txbuf[3] = DRM_HDCP_KSV_LEN - 1;
+
+ ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf,
+ sizeof(rxbuf), true);
+ if (ret < 0) {
+ DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+ return ret;
+ } else if (ret == 0) {
+ DRM_ERROR("Aksv write over DP/AUX was empty\n");
+ return -EIO;
+ }
+
+ reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
+ return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
+}
+
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+ u8 *bksv)
+{
+ ssize_t ret;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret != DRM_HDCP_KSV_LEN) {
+ DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus)
+{
+ ssize_t ret;
+ /*
+ * For some reason the HDMI and DP HDCP specs call this register
+ * definition by different names. In the HDMI spec, it's called BSTATUS,
+ * but in DP it's called BINFO.
+ */
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret != DRM_HDCP_BSTATUS_LEN) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
+ u8 *bcaps)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ bcaps, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+ u8 *ri_prime)
+{
+ ssize_t ret;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret != DRM_HDCP_RI_LEN) {
+ DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready)
+{
+ ssize_t ret;
+ u8 bstatus;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ *ksv_ready = bstatus & DP_BSTATUS_READY;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ ssize_t ret;
+ int i;
+
+ /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
+ for (i = 0; i < num_downstream; i += 3) {
+ size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_AUX_HDCP_KSV_FIFO,
+ ksv_fifo + i * DRM_HDCP_KSV_LEN,
+ len);
+ if (ret != len) {
+ DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
+ ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part)
+{
+ ssize_t ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_AUX_HDCP_V_PRIME(i), part,
+ DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
+ DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+ bool enable)
+{
+ /* Not used for single stream DisplayPort setups */
+ return 0;
+}
+
+static
+bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+{
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return false;
+ }
+
+ return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
+}
+
+static
+int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
+ bool *hdcp_capable)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
+ return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .check_link = intel_dp_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+};
+
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
@@ -5190,6 +5444,9 @@ err:
drm_modeset_acquire_fini(&ctx);
WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
+ /* Short pulse can signify loss of hdcp authentication */
+ intel_hdcp_check_link(intel_dp->attached_connector);
+
if (!handled) {
intel_dp->detect_done = false;
goto put_power;
@@ -6179,6 +6436,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_add_properties(intel_dp, connector);
+ if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
+ int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
+ if (ret)
+ DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ }
+
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cdcbeb595bb1..468ec1e90e16 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -41,20 +41,21 @@
#include <drm/drm_atomic.h>
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, US, Wmin, Wmax) ({ \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
int ret__; \
might_sleep(); \
for (;;) { \
bool expired__ = time_after(jiffies, timeout__); \
+ OP; \
if (COND) { \
ret__ = 0; \
break; \
@@ -70,7 +71,9 @@
ret__; \
})
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
@@ -298,6 +301,80 @@ struct intel_panel {
} backlight;
};
+/*
+ * This structure serves as a translation layer between the generic HDCP code
+ * and the bus-specific code. What that means is that HDCP over HDMI differs
+ * from HDCP over DP, so to account for these differences, we need to
+ * communicate with the receiver through this shim.
+ *
+ * For completeness, the 2 buses differ in the following ways:
+ * - DP AUX vs. DDC
+ * HDCP registers on the receiver are set via DP AUX for DP, and
+ * they are set via DDC for HDMI.
+ * - Receiver register offsets
+ * The offsets of the registers are different for DP vs. HDMI
+ * - Receiver register masks/offsets
+ * For instance, the ready bit for the KSV fifo is in a different
+ * place on DP vs HDMI
+ * - Receiver register names
+ * Seriously. In the DP spec, the 16-bit register containing
+ * downstream information is called BINFO, on HDMI it's called
+ * BSTATUS. To confuse matters further, DP has a BSTATUS register
+ * with a completely different definition.
+ * - KSV FIFO
+ * On HDMI, the ksv fifo is read all at once, whereas on DP it must
+ * be read 3 keys at a time
+ * - Aksv output
+ * Since Aksv is hidden in hardware, there's different procedures
+ * to send it over DP AUX vs DDC
+ */
+struct intel_hdcp_shim {
+ /* Outputs the transmitter's An and Aksv values to the receiver. */
+ int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+
+ /* Reads the receiver's key selection vector */
+ int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+
+ /*
+ * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
+ * definitions are the same in the respective specs, but the names are
+ * different. Call it BSTATUS since that's the name the HDMI spec
+ * uses and it was there first.
+ */
+ int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus);
+
+ /* Determines whether a repeater is present downstream */
+ int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present);
+
+ /* Reads the receiver's Ri' value */
+ int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+
+ /* Determines if the receiver's KSV FIFO is ready for consumption */
+ int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready);
+
+ /* Reads the ksv fifo for num_downstream devices */
+ int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo);
+
+ /* Reads a 32-bit part of V' from the receiver */
+ int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part);
+
+ /* Enables HDCP signalling on the port */
+ int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+ bool enable);
+
+ /* Ensures the link is still protected */
+ bool (*check_link)(struct intel_digital_port *intel_dig_port);
+
+ /* Detects panel's hdcp capability. This is optional for HDMI. */
+ int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
+ bool *hdcp_capable);
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -329,6 +406,12 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
+
+ const struct intel_hdcp_shim *hdcp_shim;
+ struct mutex hdcp_mutex;
+ uint64_t hdcp_value; /* protected by hdcp_mutex */
+ struct delayed_work hdcp_check_work;
+ struct work_struct hdcp_prop_work;
};
struct intel_digital_connector_state {
@@ -1297,6 +1380,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int plane, unsigned int height);
@@ -1757,6 +1842,16 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+/* intel_hdcp.c */
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+int intel_hdcp_check_link(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
new file mode 100644
index 000000000000..14ca5d3057a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_hdcp.h>
+#include <linux/i2c.h>
+#include <linux/random.h>
+
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+#define KEY_LOAD_TRIES 5
+
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ int ret, read_ret;
+ bool ksv_ready;
+
+ /* Poll for ksv list ready (spec says max time allowed is 5s) */
+ ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+ &ksv_ready),
+ read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
+ 100 * 1000);
+ if (ret)
+ return ret;
+ if (read_ret)
+ return read_ret;
+ if (!ksv_ready)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
+ HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+}
+
+static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 val;
+
+ val = I915_READ(HDCP_KEY_STATUS);
+ if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
+ return 0;
+
+ /*
+ * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
+ * out of reset. So if Key is not already loaded, its an error state.
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ return -ENXIO;
+
+ /*
+ * Initiate loading the HDCP key from fuses.
+ *
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
+ * differ in the key load trigger process from other platforms.
+ */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = sandybridge_pcode_write(dev_priv,
+ SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ mutex_unlock(&dev_priv->pcu_lock);
+ if (ret) {
+ DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ }
+
+ /* Wait for the keys to load (500us) */
+ ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
+ if (ret)
+ return ret;
+ else if (!(val & HDCP_KEY_LOAD_STATUS))
+ return -ENXIO;
+
+ /* Send Aksv over to PCH display for use in authentication */
+ I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+
+ return 0;
+}
+
+/* Returns updated SHA-1 index */
+static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+{
+ I915_WRITE(HDCP_SHA_TEXT, sha_text);
+ if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+ DRM_ERROR("Timed out waiting for SHA1 ready\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static
+u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+{
+ enum port port = intel_dig_port->base.port;
+ switch (port) {
+ case PORT_A:
+ return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
+ case PORT_B:
+ return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
+ case PORT_C:
+ return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
+ case PORT_D:
+ return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
+ case PORT_E:
+ return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
+}
+
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+ return true;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv;
+ u32 vprime, sha_text, sha_leftovers, rep_ctl;
+ u8 bstatus[2], num_downstream, *ksv_fifo;
+ int ret, i, j, sha_idx;
+
+ dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+ ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+ if (ret) {
+ DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ return ret;
+ }
+
+ ret = shim->read_bstatus(intel_dig_port, bstatus);
+ if (ret)
+ return ret;
+
+ if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+ DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+ DRM_ERROR("Max Topology Limit Exceeded\n");
+ return -EPERM;
+ }
+
+ /*
+ * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+ * the HDCP encryption. That implies that repeater can't have its own
+ * display. As there is no consumption of encrypted content in the
+ * repeater with 0 downstream devices, we are failing the
+ * authentication.
+ */
+ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+ if (num_downstream == 0)
+ return -EINVAL;
+
+ ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!ksv_fifo)
+ return -ENOMEM;
+
+ ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+ if (ret)
+ return ret;
+
+ /* Process V' values from the receiver */
+ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
+ ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+ if (ret)
+ return ret;
+ I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+ }
+
+ /*
+ * We need to write the concatenation of all device KSVs, BINFO (DP) ||
+ * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
+ * stream is written via the HDCP_SHA_TEXT register in 32-bit
+ * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
+ * index will keep track of our progress through the 64 bytes as well as
+ * helping us work the 40-bit KSVs through our 32-bit register.
+ *
+ * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
+ */
+ sha_idx = 0;
+ sha_text = 0;
+ sha_leftovers = 0;
+ rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ for (i = 0; i < num_downstream; i++) {
+ unsigned int sha_empty;
+ u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
+
+ /* Fill up the empty slots in sha_text and write it out */
+ sha_empty = sizeof(sha_text) - sha_leftovers;
+ for (j = 0; j < sha_empty; j++)
+ sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Programming guide writes this every 64 bytes */
+ sha_idx += sizeof(sha_text);
+ if (!(sha_idx % 64))
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+
+ /* Store the leftover bytes from the ksv in sha_text */
+ sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
+ sha_text = 0;
+ for (j = 0; j < sha_leftovers; j++)
+ sha_text |= ksv[sha_empty + j] <<
+ ((sizeof(sha_text) - j - 1) * 8);
+
+ /*
+ * If we still have room in sha_text for more data, continue.
+ * Otherwise, write it out immediately.
+ */
+ if (sizeof(sha_text) > sha_leftovers)
+ continue;
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_leftovers = 0;
+ sha_text = 0;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
+ * bytes are leftover from the last ksv, we might be able to fit them
+ * all in sha_text (first 2 cases), or we might need to split them up
+ * into 2 writes (last 2 cases).
+ */
+ if (sha_leftovers == 0) {
+ /* Write 16 bits of text, 16 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv,
+ bstatus[0] << 8 | bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 16 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 1) {
+ /* Write 24 bits of text, 8 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
+ /* Only 24-bits of data, must be in the LSB */
+ sha_text = (sha_text & 0xffffff00) >> 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 24 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 2) {
+ /* Write 32 bits of text */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 64 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ for (i = 0; i < 2; i++) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+ } else if (sha_leftovers == 3) {
+ /* Write 32 bits of text */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0] << 24;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of text, 24 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ } else {
+ DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
+ return -EINVAL;
+ }
+
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
+ while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * Last write gets the length of the concatenation in bits. That is:
+ * - 5 bytes per device
+ * - 10 bytes for BINFO/BSTATUS(2), M0(8)
+ */
+ sha_text = (num_downstream * 5 + 10) * 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Tell the HW we're done with the hash and wait for it to ACK */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE,
+ HDCP_SHA1_COMPLETE, 1)) {
+ DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ return -ETIMEDOUT;
+ }
+ if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
+ return -ENXIO;
+ }
+
+ DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
+ return 0;
+}
+
+/* Implements Part 1 of the HDCP authorization procedure */
+static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv;
+ enum port port;
+ unsigned long r0_prime_gen_start;
+ int ret, i, tries = 2;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_AN_LEN];
+ } an;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_KSV_LEN];
+ } bksv;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+ bool repeater_present, hdcp_capable;
+
+ dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+ port = intel_dig_port->base.port;
+
+ /*
+ * Detects whether the display is HDCP capable. Although we check for
+ * valid Bksv below, the HDCP over DP spec requires that we check
+ * whether the display supports HDCP before we write An. For HDMI
+ * displays, this is not necessary.
+ */
+ if (shim->hdcp_capable) {
+ ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+ if (ret)
+ return ret;
+ if (!hdcp_capable) {
+ DRM_ERROR("Panel is not HDCP capable\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Initialize An with 2 random values and acquire it */
+ for (i = 0; i < 2; i++)
+ I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
+ I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+
+ /* Wait for An to be acquired */
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_AN_READY,
+ HDCP_STATUS_AN_READY, 1)) {
+ DRM_ERROR("Timed out waiting for An\n");
+ return -ETIMEDOUT;
+ }
+
+ an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
+ an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ ret = shim->write_an_aksv(intel_dig_port, an.shim);
+ if (ret)
+ return ret;
+
+ r0_prime_gen_start = jiffies;
+
+ memset(&bksv, 0, sizeof(bksv));
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(intel_dig_port, bksv.shim);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv.shim))
+ break;
+ }
+ if (i == tries) {
+ DRM_ERROR("HDCP failed, Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
+ I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+
+ ret = shim->repeater_present(intel_dig_port, &repeater_present);
+ if (ret)
+ return ret;
+ if (repeater_present)
+ I915_WRITE(HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(intel_dig_port));
+
+ ret = shim->toggle_signalling(intel_dig_port, true);
+ if (ret)
+ return ret;
+
+ I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+
+ /* Wait for R0 ready */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Timed out waiting for R0 ready\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Wait for R0' to become available. The spec says 100ms from Aksv, but
+ * some monitors can take longer than this. We'll set the timeout at
+ * 300ms just to be sure.
+ *
+ * On DP, there's an R0_READY bit available but no such bit
+ * exists on HDMI. Since the upper-bound is the same, we'll just do
+ * the stupid thing instead of polling on one and not the other.
+ */
+ wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
+
+ ri.reg = 0;
+ ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+ if (ret)
+ return ret;
+ I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for encryption confirmation */
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
+ DRM_ERROR("Timed out waiting for encryption\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * XXX: If we have MST-connected devices, we need to enable encryption
+ * on those as well.
+ */
+
+ if (repeater_present)
+ return intel_hdcp_auth_downstream(intel_dig_port, shim);
+
+ DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ return 0;
+}
+
+static
+struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
+static int _intel_hdcp_disable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ I915_WRITE(PORT_HDCP_CONF(port), 0);
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+ 20)) {
+ DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable HDCP signalling\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("HDCP is disabled\n");
+ return 0;
+}
+
+static int _intel_hdcp_enable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ int i, ret, tries = 3;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
+ DRM_ERROR("PG1 is disabled, cannot load keys\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < KEY_LOAD_TRIES; i++) {
+ ret = intel_hdcp_load_keys(dev_priv);
+ if (!ret)
+ break;
+ intel_hdcp_clear_keys(dev_priv);
+ }
+ if (ret) {
+ DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+ return ret;
+ }
+
+ /* Incase of authentication failures, HDCP spec expects reauth. */
+ for (i = 0; i < tries; i++) {
+ ret = intel_hdcp_auth(conn_to_dig_port(connector),
+ connector->hdcp_shim);
+ if (!ret)
+ return 0;
+
+ DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+
+ /* Ensuring HDCP encryption and signalling are stopped. */
+ _intel_hdcp_disable(connector);
+ }
+
+ DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(to_delayed_work(work),
+ struct intel_connector,
+ hdcp_check_work);
+ if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&connector->hdcp_check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static void intel_hdcp_prop_work(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(work,
+ struct intel_connector,
+ hdcp_prop_work);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_connector_state *state;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&connector->hdcp_mutex);
+
+ /*
+ * This worker is only used to flip between ENABLED/DESIRED. Either of
+ * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+ * we're running just after hdcp has been disabled, so just exit
+ */
+ if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ state = connector->base.state;
+ state->content_protection = connector->hdcp_value;
+ }
+
+ mutex_unlock(&connector->hdcp_mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
+{
+ /* PORT E doesn't have HDCP, and PORT F is disabled */
+ return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
+}
+
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim)
+{
+ int ret;
+
+ ret = drm_connector_attach_content_protection_property(
+ &connector->base);
+ if (ret)
+ return ret;
+
+ connector->hdcp_shim = hdcp_shim;
+ mutex_init(&connector->hdcp_mutex);
+ INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
+ INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+ return 0;
+}
+
+int intel_hdcp_enable(struct intel_connector *connector)
+{
+ int ret;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret)
+ goto out;
+
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&connector->hdcp_prop_work);
+ schedule_delayed_work(&connector->hdcp_check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+out:
+ mutex_unlock(&connector->hdcp_mutex);
+ return ret;
+}
+
+int intel_hdcp_disable(struct intel_connector *connector)
+{
+ int ret = 0;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ ret = _intel_hdcp_disable(connector);
+ }
+
+ mutex_unlock(&connector->hdcp_mutex);
+ cancel_delayed_work_sync(&connector->hdcp_check_work);
+ return ret;
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ uint64_t old_cp = old_state->content_protection;
+ uint64_t new_cp = new_state->content_protection;
+ struct drm_crtc_state *crtc_state;
+
+ if (!new_state->crtc) {
+ /*
+ * If the connector is being disabled with CP enabled, mark it
+ * desired so it's re-enabled when the connector is brought back
+ */
+ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return;
+ }
+
+ /*
+ * Nothing to do if the state didn't change, or HDCP was activated since
+ * the last commit
+ */
+ if (old_cp == new_cp ||
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ crtc_state->mode_changed = true;
+}
+
+/* Implements Part 3 of the HDCP authorization procedure */
+int intel_hdcp_check_link(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret = 0;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
+ DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
+ connector->base.name, connector->base.base.id,
+ I915_READ(PORT_HDCP_STATUS(port)));
+ ret = -ENXIO;
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+ if (connector->hdcp_shim->check_link(intel_dig_port)) {
+ if (connector->hdcp_value !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ connector->hdcp_value =
+ DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&connector->hdcp_prop_work);
+ }
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&connector->hdcp_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 23150f598dfa..f5d7bfb43006 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -876,6 +877,248 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
+static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+ u8 start = offset & 0xff;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buffer
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs))
+ return 0;
+ return ret >= 0 ? -EIO : ret;
+}
+
+static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+ u8 *write_buf;
+ struct i2c_msg msg;
+
+ write_buf = kzalloc(size + 1, GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ write_buf[0] = offset & 0xff;
+ memcpy(&write_buf[1], buffer, size);
+
+ msg.addr = DRM_HDCP_DDC_ADDR;
+ msg.flags = 0,
+ msg.len = size + 1,
+ msg.buf = write_buf;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret == 1)
+ return 0;
+ return ret >= 0 ? -EIO : ret;
+}
+
+static
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+ u8 *an)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+
+ ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
+ DRM_HDCP_AN_LEN);
+ if (ret) {
+ DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = intel_gmbus_output_aksv(adapter);
+ if (ret < 0) {
+ DRM_ERROR("Failed to output aksv (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+ u8 *bksv)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret)
+ DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret)
+ DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present)
+{
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+ u8 *ri_prime)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret)
+ DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready)
+{
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
+ ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
+ if (ret) {
+ DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part)
+{
+ int ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
+ part, DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret)
+ DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+ bool enable)
+{
+ int ret;
+
+ if (!enable)
+ usleep_range(6, 60); /* Bspec says >= 6us */
+
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
+ if (ret) {
+ DRM_ERROR("%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ enum port port = intel_dig_port->base.port;
+ int ret;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+
+ ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
+ if (ret)
+ return false;
+
+ I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
+ return false;
+ }
+ return true;
+}
+
+static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
+ .write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
+ .read_bksv = intel_hdmi_hdcp_read_bksv,
+ .read_bstatus = intel_hdmi_hdcp_read_bstatus,
+ .repeater_present = intel_hdmi_hdcp_repeater_present,
+ .read_ri_prime = intel_hdmi_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
+ .check_link = intel_hdmi_hdcp_check_link,
+};
+
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -2097,6 +2340,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
+ if (is_hdcp_supported(dev_priv, port)) {
+ int ret = intel_hdcp_init(intel_connector,
+ &intel_hdmi_hdcp_shim);
+ if (ret)
+ DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ }
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_hdmi->attached_connector = intel_connector;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index ad1b1a345f2e..e6875509bcd9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -30,6 +30,7 @@
#include <linux/i2c-algo-bit.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -415,7 +416,8 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
static int
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
- unsigned short addr, u8 *buf, unsigned int len)
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
unsigned int chunk_size = len;
u32 val, loop;
@@ -428,7 +430,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GMBUS3, val);
I915_WRITE_FW(GMBUS1,
- GMBUS_CYCLE_WAIT |
+ gmbus1_index | GMBUS_CYCLE_WAIT |
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
@@ -451,7 +453,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int tx_size = msg->len;
@@ -461,7 +464,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
do {
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+ gmbus1_index);
if (ret)
return ret;
@@ -473,21 +477,21 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
}
/*
- * The gmbus controller can combine a 1 or 2 byte write with a read that
- * immediately follows it by using an "INDEX" cycle.
+ * The gmbus controller can combine a 1 or 2 byte write with another read/write
+ * that immediately follows it by using an "INDEX" cycle.
*/
static bool
-gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
msgs[i].addr == msgs[i + 1].addr &&
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i].len == 1 || msgs[i].len == 2) &&
- (msgs[i + 1].flags & I2C_M_RD));
+ msgs[i + 1].len > 0);
}
static int
-gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
{
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
@@ -504,7 +508,10 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
if (gmbus5)
I915_WRITE_FW(GMBUS5, gmbus5);
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ if (msgs[1].flags & I2C_M_RD)
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ else
+ ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
@@ -514,7 +521,8 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
}
static int
-do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
+ u32 gmbus0_source)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
@@ -531,17 +539,17 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
pch_gmbus_clock_gating(dev_priv, false);
retry:
- I915_WRITE_FW(GMBUS0, bus->reg0);
+ I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
- if (gmbus_is_index_read(msgs, i, num)) {
- ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- inc = 2; /* an index read is two msgs */
+ if (gmbus_is_index_xfer(msgs, i, num)) {
+ ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+ inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+ ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
}
if (!ret)
@@ -656,7 +664,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
if (ret < 0)
bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
} else {
- ret = do_gmbus_xfer(adapter, msgs, num);
+ ret = do_gmbus_xfer(adapter, msgs, num, 0);
if (ret == -EAGAIN)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
@@ -666,6 +674,45 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
return ret;
}
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int ret;
+ u8 cmd = DRM_HDCP_DDC_AKSV;
+ u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(cmd),
+ .buf = &cmd,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ /*
+ * In order to output Aksv to the receiver, use an indexed write to
+ * pass the i2c command, and tell GMBUS to use the HW-provided value
+ * instead of sourcing GMBUS3 for the data.
+ */
+ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
+ return ret;
+}
+
static u32 gmbus_func(struct i2c_adapter *adapter)
{
return i2c_bit_algo.functionality(adapter) &
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4075010ac088..164dbb8cfa36 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1767,12 +1767,14 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
}
/**
- * intel_wait_for_register - wait until register matches expected state
+ * __intel_wait_for_register - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
- * @timeout_ms: timeout in millisecond
+ * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
+ * @slow_timeout_ms: slow timeout in millisecond
+ * @out_value: optional placeholder to hold registry value
*
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
@@ -1783,14 +1785,17 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms)
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
{
unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ u32 reg_value;
int ret;
might_sleep();
@@ -1800,14 +1805,18 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv,
ret = __intel_wait_for_register_fw(dev_priv,
reg, mask, value,
- 2, 0, NULL);
+ fast_timeout_us, 0, &reg_value);
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irq(&dev_priv->uncore.lock);
if (ret)
- ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
- timeout_ms);
+ ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+ (reg_value & mask) == value,
+ slow_timeout_ms * 1000, 10, 1000);
+
+ if (out_value)
+ *out_value = reg_value;
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 9ce079b5dd0d..bed019ef000f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -163,11 +163,23 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value);
+static inline
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms);
+ unsigned int timeout_ms)
+{
+ return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+ timeout_ms, NULL);
+}
int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,