summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/display/drm_dp_mst_topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/display/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c187
1 files changed, 101 insertions, 86 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 06c91c5b7f7c..a89f38fd3218 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -171,18 +171,30 @@ static const char *drm_dp_mst_sideband_tx_state_str(int state)
return sideband_reason_str[state];
}
+static inline u8
+drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)
+{
+ int idx = (lct / 2) - 1;
+ int shift = (lct % 2) ? 0 : 4;
+ u8 ufp_num;
+
+ /* mst_primary, it's rad is unset*/
+ if (lct == 1)
+ return 0;
+
+ ufp_num = (rad[idx] >> shift) & 0xf;
+
+ return ufp_num;
+}
+
static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
- u8 unpacked_rad[16];
+ u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) {
- if (i % 2)
- unpacked_rad[i] = rad[i / 2] >> 4;
- else
- unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
- }
+ for (i = 0; i < lct; i++)
+ unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
/* TODO: Eventually add something to printk so we can format the rad
* like this: 1.2.3
@@ -2180,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2544,9 +2552,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
if (!mstb)
goto out;
- for (i = 0; i < lct - 1; i++) {
- int shift = (i % 2) ? 0 : 4;
- int port_num = (rad[i / 2] >> shift) & 0xf;
+ for (i = 1; i < lct; i++) {
+ int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -2733,14 +2740,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3613,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3668,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3686,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3752,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3802,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3872,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3911,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4025,6 +4031,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4055,8 +4077,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4138,10 +4164,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4150,16 +4177,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4174,9 +4191,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
@@ -4862,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4953,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6093,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6138,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6161,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))