From 6cba3fe433415b2549c909ce72601902c8254a83 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Fri, 23 Apr 2021 14:42:55 -0400 Subject: drm/dp: Add backpointer to drm_device in drm_dp_aux This is something that we've wanted for a while now: the ability to actually look up the respective drm_device for a given drm_dp_aux struct. This will also allow us to transition over to using the drm_dbg_*() helpers for debug message printing, as we'll finally have a drm_device to reference for doing so. Note that there is one limitation with this - because some DP AUX adapters exist as platform devices which are initialized independently of their respective DRM devices, one cannot rely on drm_dp_aux->drm_dev to always be non-NULL until drm_dp_aux_register() has been called. We make sure to point this out in the documentation for struct drm_dp_aux. v3: * Add WARN_ON_ONCE() to drm_dp_aux_register() if drm_dev isn't filled out Signed-off-by: Lyude Paul Acked-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20210423184309.207645-4-lyude@redhat.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/drm_dp_mst_topology.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 159014455fab..276f7f054d62 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2350,6 +2350,7 @@ drm_dp_mst_add_port(struct drm_device *dev, port->aux.is_remote = true; /* initialize the MST downstream port's AUX crc work queue */ + port->aux.drm_dev = dev; drm_dp_remote_aux_init(&port->aux); /* -- cgit v1.2.3 From c869c5f8ceca43d61db2176f88a0321f4ce50961 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Fri, 23 Apr 2021 14:43:05 -0400 Subject: drm/dp_mst: Pass drm_dp_mst_topology_mgr to drm_dp_get_vc_payload_bw() Since this is one of the few functions in drm_dp_mst_topology.c that doesn't have any way of getting access to a drm_device, let's pass the drm_dp_mst_topology_mgr down to this function so that it can use drm_dbg_kms(). Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20210423184309.207645-14-lyude@redhat.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/drm_dp_mst_topology.c | 7 +++++-- drivers/gpu/drm/i915/display/intel_dp_mst.c | 3 ++- include/drm/drm_dp_mst_helper.h | 3 ++- 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 276f7f054d62..9bac5bd050ab 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3638,6 +3638,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, /** * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link + * @mgr: The &drm_dp_mst_topology_mgr to use * @link_rate: link rate in 10kbits/s units * @link_lane_count: lane count * @@ -3646,7 +3647,8 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, * convert the number of PBNs required for a given stream to the number of * timeslots this stream requires in each MTP. */ -int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count) +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count) { if (link_rate == 0 || link_lane_count == 0) DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n", @@ -3711,7 +3713,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } - mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), + mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr, + drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); if (mgr->pbn_div == 0) { ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 2daa3f67791e..860381d68d9d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -70,7 +70,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, connector->port, crtc_state->pbn, - drm_dp_get_vc_payload_bw(crtc_state->port_clock, + drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, + crtc_state->port_clock, crtc_state->lane_count)); if (slots == -EDEADLK) return slots; diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index bd1c39907b92..20dc705642bd 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -783,7 +783,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); -- cgit v1.2.3 From 3a08f53dcf17e7cbdc6143e88791b3a10d0fdd48 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Fri, 23 Apr 2021 14:43:09 -0400 Subject: drm/dp_mst: Convert drm_dp_mst_topology.c to drm_err()/drm_dbg*() And finally, convert all of the code in drm_dp_mst_topology.c over to using drm_err() and drm_dbg*(). Note that this refactor would have been a lot more complicated to have tried writing a coccinelle script for, so this whole thing was done by hand. v2: * Fix line-wrapping in drm_dp_mst_atomic_check_mstb_bw_limit() Signed-off-by: Lyude Paul Cc: Robert Foss Reviewed-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20210423184309.207645-18-lyude@redhat.com Reviewed-by: Dave Airlie --- drivers/gpu/drm/drm_dp_mst_topology.c | 368 +++++++++++++++++----------------- 1 file changed, 187 insertions(+), 181 deletions(-) (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9bac5bd050ab..5539a91b4031 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -286,7 +286,8 @@ static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, *len = idx; } -static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, +static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_hdr *hdr, u8 *buf, int buflen, u8 *hdrlen) { u8 crc4; @@ -303,7 +304,7 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { - DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); + drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); return false; } @@ -789,7 +790,8 @@ static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg, return true; } -static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, +static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *repmsg) { int idx = 1; @@ -1014,7 +1016,8 @@ drm_dp_sideband_parse_query_stream_enc_status( return true; } -static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, +static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *msg) { memset(msg, 0, sizeof(*msg)); @@ -1030,7 +1033,7 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, switch (msg->req_type) { case DP_LINK_ADDRESS: - return drm_dp_sideband_parse_link_address(raw, msg); + return drm_dp_sideband_parse_link_address(mgr, raw, msg); case DP_QUERY_PAYLOAD: return drm_dp_sideband_parse_query_payload_ack(raw, msg); case DP_REMOTE_DPCD_READ: @@ -1053,14 +1056,16 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, case DP_QUERY_STREAM_ENC_STATUS: return drm_dp_sideband_parse_query_stream_enc_status(raw, msg); default: - DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, - drm_dp_mst_req_type_str(msg->req_type)); + drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", + msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); return false; } } -static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, - struct drm_dp_sideband_msg_req_body *msg) +static bool +drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) { int idx = 1; @@ -1082,12 +1087,14 @@ static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideban idx++; return true; fail_len: - DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); + drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", + idx, raw->curlen); return false; } -static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, - struct drm_dp_sideband_msg_req_body *msg) +static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) { int idx = 1; @@ -1105,11 +1112,12 @@ static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_ idx++; return true; fail_len: - DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); + drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); return false; } -static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, +static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_req_body *msg) { memset(msg, 0, sizeof(*msg)); @@ -1117,12 +1125,12 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, switch (msg->req_type) { case DP_CONNECTION_STATUS_NOTIFY: - return drm_dp_sideband_parse_connection_status_notify(raw, msg); + return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg); case DP_RESOURCE_STATUS_NOTIFY: - return drm_dp_sideband_parse_resource_status_notify(raw, msg); + return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg); default: - DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type, - drm_dp_mst_req_type_str(msg->req_type)); + drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", + msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); return false; } } @@ -1232,14 +1240,14 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); if (ret > mgr->max_payloads) { ret = -EINVAL; - DRM_DEBUG_KMS("out of payload ids %d\n", ret); + drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret); goto out_unlock; } vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); if (vcpi_ret > mgr->max_payloads) { ret = -EINVAL; - DRM_DEBUG_KMS("out of vcpi ids %d\n", ret); + drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret); goto out_unlock; } @@ -1261,7 +1269,7 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, return; mutex_lock(&mgr->payload_lock); - DRM_DEBUG_KMS("putting payload %d\n", vcpi); + drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi); clear_bit(vcpi - 1, &mgr->vcpi_mask); for (i = 0; i < mgr->max_payloads; i++) { @@ -1331,7 +1339,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, goto out; } } else { - DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); + drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", + txmsg, txmsg->state, txmsg->seqno); /* dump some state */ ret = -EIO; @@ -1485,7 +1494,7 @@ static void drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) { kref_get(&mstb->malloc_kref); - DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); + drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); } /** @@ -1502,7 +1511,7 @@ drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) { - DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); + drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); } @@ -1536,7 +1545,7 @@ void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) { kref_get(&port->malloc_kref); - DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)); + drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); } EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); @@ -1553,7 +1562,7 @@ EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) { - DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); + drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); kref_put(&port->malloc_kref, drm_dp_free_mst_port); } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); @@ -1778,8 +1787,7 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) topology_ref_history_lock(mstb->mgr); ret = kref_get_unless_zero(&mstb->topology_kref); if (ret) { - DRM_DEBUG("mstb %p (%d)\n", - mstb, kref_read(&mstb->topology_kref)); + drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); } @@ -1809,7 +1817,7 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); - DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); topology_ref_history_unlock(mstb->mgr); } @@ -1831,8 +1839,7 @@ drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { topology_ref_history_lock(mstb->mgr); - DRM_DEBUG("mstb %p (%d)\n", - mstb, kref_read(&mstb->topology_kref) - 1); + drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); topology_ref_history_unlock(mstb->mgr); @@ -1895,8 +1902,7 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) topology_ref_history_lock(port->mgr); ret = kref_get_unless_zero(&port->topology_kref); if (ret) { - DRM_DEBUG("port %p (%d)\n", - port, kref_read(&port->topology_kref)); + drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); } @@ -1923,7 +1929,7 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); - DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); topology_ref_history_unlock(port->mgr); @@ -1944,8 +1950,7 @@ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { topology_ref_history_lock(port->mgr); - DRM_DEBUG("port %p (%d)\n", - port, kref_read(&port->topology_kref) - 1); + drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); topology_ref_history_unlock(port->mgr); @@ -2130,8 +2135,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, mstb = drm_dp_add_mst_branch_device(lct, rad); if (!mstb) { ret = -ENOMEM; - DRM_ERROR("Failed to create MSTB for port %p", - port); + drm_err(mgr->dev, "Failed to create MSTB for port %p", port); goto out; } @@ -2261,8 +2265,8 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, int drm_dp_mst_connector_late_register(struct drm_connector *connector, struct drm_dp_mst_port *port) { - DRM_DEBUG_KMS("registering %s remote bus for %s\n", - port->aux.name, connector->kdev->kobj.name); + drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", + port->aux.name, connector->kdev->kobj.name); port->aux.dev = connector->kdev; return drm_dp_aux_register_devnode(&port->aux); @@ -2281,8 +2285,8 @@ EXPORT_SYMBOL(drm_dp_mst_connector_late_register); void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, struct drm_dp_mst_port *port) { - DRM_DEBUG_KMS("unregistering %s remote bus for %s\n", - port->aux.name, connector->kdev->kobj.name); + drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", + port->aux.name, connector->kdev->kobj.name); drm_dp_aux_unregister_devnode(&port->aux); } EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); @@ -2312,7 +2316,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, return; error: - DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); + drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); } /* @@ -2452,8 +2456,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, if (ret == 1) { send_link_addr = true; } else if (ret < 0) { - DRM_ERROR("Failed to change PDT on port %p: %d\n", - port, ret); + drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret); goto fail; } @@ -2548,8 +2551,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, if (ret == 1) { dowork = true; } else if (ret < 0) { - DRM_ERROR("Failed to change PDT for port %p: %d\n", - port, ret); + drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); dowork = false; } @@ -2608,7 +2610,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ if (port->port_num == port_num) { mstb = port->mstb; if (!mstb) { - DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); + drm_err(mgr->dev, + "failed to lookup MSTB with lct %d, rad %02x\n", + lct, rad[0]); goto out; } @@ -2744,7 +2748,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) * things work again. */ if (clear_payload_id_table) { - DRM_DEBUG_KMS("Clearing payload ID table\n"); + drm_dbg_kms(dev, "Clearing payload ID table\n"); drm_dp_send_clear_payload_id_table(mgr, mstb); } @@ -2806,7 +2810,7 @@ retry: retries++; goto retry; } - DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); + drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); return -EIO; } @@ -2919,7 +2923,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_sideband_msg_tx, next); ret = process_single_tx_qlock(mgr, txmsg, false); if (ret < 0) { - DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); list_del(&txmsg->next); txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; wake_up_all(&mgr->tx_waitq); @@ -2944,24 +2948,26 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, } static void -drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) +drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_link_address_ack_reply *reply) { struct drm_dp_link_addr_reply_port *port_reply; int i; for (i = 0; i < reply->nports; i++) { port_reply = &reply->ports[i]; - DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", - i, - port_reply->input_port, - port_reply->peer_device_type, - port_reply->port_number, - port_reply->dpcd_revision, - port_reply->mcs, - port_reply->ddps, - port_reply->legacy_device_plug_status, - port_reply->num_sdp_streams, - port_reply->num_sdp_stream_sinks); + drm_dbg_kms(mgr->dev, + "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", + i, + port_reply->input_port, + port_reply->peer_device_type, + port_reply->port_number, + port_reply->dpcd_revision, + port_reply->mcs, + port_reply->ddps, + port_reply->legacy_device_plug_status, + port_reply->num_sdp_streams, + port_reply->num_sdp_stream_sinks); } } @@ -2987,26 +2993,25 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, /* FIXME: Actually do some real error handling here */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret <= 0) { - DRM_ERROR("Sending link address failed with %d\n", ret); + drm_err(mgr->dev, "Sending link address failed with %d\n", ret); goto out; } if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { - DRM_ERROR("link address NAK received\n"); + drm_err(mgr->dev, "link address NAK received\n"); ret = -EIO; goto out; } reply = &txmsg->reply.u.link_addr; - DRM_DEBUG_KMS("link address reply: %d\n", reply->nports); - drm_dp_dump_link_address(reply); + drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); + drm_dp_dump_link_address(mgr, reply); ret = drm_dp_check_mstb_guid(mstb, reply->guid); if (ret) { char buf[64]; drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); - DRM_ERROR("GUID check on %s failed: %d\n", - buf, ret); + drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); goto out; } @@ -3030,8 +3035,8 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, if (port_mask & BIT(port->port_num)) continue; - DRM_DEBUG_KMS("port %d was not in link address, removing\n", - port->port_num); + drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", + port->port_num); list_del(&port->next); drm_dp_mst_topology_put_port(port); changed = true; @@ -3063,7 +3068,7 @@ drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) - DRM_DEBUG_KMS("clear payload table id nak received\n"); + drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); kfree(txmsg); } @@ -3092,15 +3097,15 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res = &txmsg->reply.u.path_resources; if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { - DRM_DEBUG_KMS("enum path resources nak received\n"); + drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); } else { if (port->port_num != path_res->port_number) DRM_ERROR("got incorrect port in response\n"); - DRM_DEBUG_KMS("enum path resources %d: %d %d\n", - path_res->port_number, - path_res->full_payload_bw_number, - path_res->avail_payload_bw_number); + drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", + path_res->port_number, + path_res->full_payload_bw_number, + path_res->avail_payload_bw_number); /* * If something changed, make sure we send a @@ -3346,7 +3351,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload) { - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(mgr->dev, "\n"); /* it's okay for these to fail */ if (port) { drm_dp_payload_send_msg(mgr, port, id, 0); @@ -3452,7 +3457,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) continue; } - DRM_DEBUG_KMS("removing payload %d\n", i); + drm_dbg_kms(mgr->dev, "removing payload %d\n", i); for (j = i; j < mgr->max_payloads - 1; j++) { mgr->payloads[j] = mgr->payloads[j + 1]; mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; @@ -3499,7 +3504,7 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); - DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); + drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state); if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { @@ -3544,8 +3549,8 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, /* DPCD read should never be NACKed */ if (txmsg->reply.reply_type == 1) { - DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", - mstb, port->port_num, offset, size); + drm_err(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", + mstb, port->port_num, offset, size); ret = -EIO; goto fail_free; } @@ -3651,8 +3656,8 @@ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, int link_rate, int link_lane_count) { if (link_rate == 0 || link_lane_count == 0) - DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n", - link_rate, link_lane_count); + drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", + link_rate, link_lane_count); /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ return link_rate * link_lane_count / 54000; @@ -3709,7 +3714,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* get dpcd info */ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); if (ret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("failed to read DPCD\n"); + drm_dbg_kms(mgr->dev, "failed to read DPCD\n"); goto out_unlock; } @@ -3844,7 +3849,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); if (ret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } @@ -3853,20 +3858,20 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); goto out_fail; } /* Some hubs forget their guids after they resume */ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); if (ret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); if (ret) { - DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n"); + drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); goto out_fail; } @@ -3879,7 +3884,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->lock); if (sync) { - DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + drm_dbg_kms(mgr->dev, + "Waiting for link probe work to finish re-syncing topology...\n"); flush_work(&mgr->work); } @@ -3912,15 +3918,15 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, len = min(mgr->max_dpcd_transaction_bytes, 16); ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); + drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); return false; } - ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen); + ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen); if (ret == false) { print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replyblock, len, false); - DRM_DEBUG_KMS("ERROR: failed header\n"); + drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); return false; } @@ -3928,22 +3934,20 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, /* Caller is responsible for giving back this reference */ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad); if (!*mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr.lct); + drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); return false; } } if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) { - DRM_DEBUG_KMS("sideband msg set header failed %d\n", - replyblock[0]); + drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); return false; } replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen); if (!ret) { - DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); + drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); return false; } @@ -3954,14 +3958,14 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", - len, ret); + drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", + len, ret); return false; } ret = drm_dp_sideband_append_payload(msg, replyblock, len); if (!ret) { - DRM_DEBUG_KMS("failed to build sideband msg\n"); + drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); return false; } @@ -3995,21 +3999,21 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_sideband_msg_hdr *hdr; hdr = &msg->initial_hdr; - DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", - mstb, hdr->seqno, hdr->lct, hdr->rad[0], - msg->msg[0]); + drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", + mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); goto out_clear_reply; } - drm_dp_sideband_parse_reply(msg, &txmsg->reply); + drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { - DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", - txmsg->reply.req_type, - drm_dp_mst_req_type_str(txmsg->reply.req_type), - txmsg->reply.u.nak.reason, - drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), - txmsg->reply.u.nak.nak_data); + drm_dbg_kms(mgr->dev, + "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", + txmsg->reply.req_type, + drm_dp_mst_req_type_str(txmsg->reply.req_type), + txmsg->reply.u.nak.reason, + drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), + txmsg->reply.u.nak.nak_data); } memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); @@ -4057,8 +4061,7 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, } if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); + drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); return false; } @@ -4118,12 +4121,12 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) INIT_LIST_HEAD(&up_req->next); - drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); + drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { - DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", - up_req->msg.req_type); + drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); kfree(up_req); goto out; } @@ -4135,20 +4138,20 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; - DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", - conn_stat->port_number, - conn_stat->legacy_device_plug_status, - conn_stat->displayport_device_plug_status, - conn_stat->message_capability_status, - conn_stat->input_port, - conn_stat->peer_device_type); + drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; - DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", - res_stat->port_number, - res_stat->available_pbn); + drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", + res_stat->port_number, + res_stat->available_pbn); } up_req->hdr = mgr->up_req_recv.initial_hdr; @@ -4388,8 +4391,9 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, * which is an error */ if (WARN_ON(!prev_slots)) { - DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", - port); + drm_err(mgr->dev, + "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", + port); return -EINVAL; } @@ -4406,12 +4410,12 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, req_slots = DIV_ROUND_UP(pbn, pbn_div); - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", - port->connector->base.id, port->connector->name, - port, prev_slots, req_slots); - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", - port->connector->base.id, port->connector->name, - port, prev_bw, pbn); + drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_slots, req_slots); + drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_bw, pbn); /* Add the new allocation to the state */ if (!vcpi) { @@ -4475,12 +4479,12 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, } } if (WARN_ON(!found)) { - DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n", - port, &topology_state->base); + drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n", + port, &topology_state->base); return -EINVAL; } - DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); if (pos->vcpi) { drm_dp_mst_put_port_malloc(port); pos->vcpi = 0; @@ -4511,8 +4515,9 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, return false; if (port->vcpi.vcpi > 0) { - DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", - port->vcpi.vcpi, port->vcpi.pbn, pbn); + drm_dbg_kms(mgr->dev, + "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", + port->vcpi.vcpi, port->vcpi.pbn, pbn); if (pbn == port->vcpi.pbn) { drm_dp_mst_topology_put_port(port); return true; @@ -4521,13 +4526,12 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); if (ret) { - DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", - DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n", + DIV_ROUND_UP(pbn, mgr->pbn_div), ret); drm_dp_mst_topology_put_port(port); goto out; } - DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", - pbn, port->vcpi.num_slots); + drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); /* Keep port allocated until its payload has been removed */ drm_dp_mst_get_port_malloc(port); @@ -4609,14 +4613,14 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); if (ret != 3) { - DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); + drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); goto fail; } retry: ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); if (ret < 0) { - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); goto fail; } @@ -4626,7 +4630,8 @@ retry: usleep_range(10000, 20000); goto retry; } - DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); + drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", + status); ret = -EINVAL; goto fail; } @@ -4673,16 +4678,15 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) status & DP_PAYLOAD_ACT_HANDLED || status < 0, 200, timeout_ms * USEC_PER_MSEC); if (ret < 0 && status >= 0) { - DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n", - timeout_ms, status); + drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", + timeout_ms, status); return -EINVAL; } else if (status < 0) { /* * Failure here isn't unexpected - the hub may have * just been unplugged */ - DRM_DEBUG_KMS("Failed to read payload table status: %d\n", - status); + drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); return status; } @@ -5122,12 +5126,11 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, return 0; if (mstb->port_parent) - DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", - mstb->port_parent->parent, mstb->port_parent, - mstb); + drm_dbg_atomic(mstb->mgr->dev, + "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", + mstb->port_parent->parent, mstb->port_parent, mstb); else - DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n", - mstb); + drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); list_for_each_entry(port, &mstb->ports, next) { ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); @@ -5185,14 +5188,14 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, } if (pbn_used > port->full_pbn) { - DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", - port->parent, port, pbn_used, - port->full_pbn); + drm_dbg_atomic(port->mgr->dev, + "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", + port->parent, port, pbn_used, port->full_pbn); return -ENOSPC; } - DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", - port->parent, port, pbn_used, port->full_pbn); + drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", + port->parent, port, pbn_used, port->full_pbn); return pbn_used; } @@ -5207,31 +5210,31 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, list_for_each_entry(vcpi, &mst_state->vcpis, next) { /* Releasing VCPI is always OK-even if the port is gone */ if (!vcpi->vcpi) { - DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n", - vcpi->port); + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n", + vcpi->port); continue; } - DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n", - vcpi->port, vcpi->vcpi); + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n", + vcpi->port, vcpi->vcpi); avail_slots -= vcpi->vcpi; if (avail_slots < 0) { - DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", - vcpi->port, mst_state, - avail_slots + vcpi->vcpi); + drm_dbg_atomic(mgr->dev, + "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", + vcpi->port, mst_state, avail_slots + vcpi->vcpi); return -ENOSPC; } if (++payload_count > mgr->max_payloads) { - DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n", - mgr, mst_state, mgr->max_payloads); + drm_dbg_atomic(mgr->dev, + "[MST MGR:%p] state %p has too many payloads (max=%d)\n", + mgr, mst_state, mgr->max_payloads); return -EINVAL; } } - DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", - mgr, mst_state, avail_slots, - 63 - avail_slots); + drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", + mgr, mst_state, avail_slots, 63 - avail_slots); return 0; } @@ -5288,8 +5291,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", - mgr, crtc); + drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", + mgr, crtc); crtc_state->mode_changed = true; } @@ -5334,21 +5337,24 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, } if (!found) { - DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", - port, mst_state); + drm_dbg_atomic(state->dev, + "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", + port, mst_state); return -EINVAL; } if (pos->dsc_enabled == enable) { - DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", - port, enable, pos->vcpi); + drm_dbg_atomic(state->dev, + "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", + port, enable, pos->vcpi); vcpi = pos->vcpi; } if (enable) { vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); - DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", - port, vcpi); + drm_dbg_atomic(state->dev, + "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", + port, vcpi); if (vcpi < 0) return -EINVAL; } @@ -5695,7 +5701,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, } else if (remote_i2c_write_ok(msgs, num)) { ret = drm_dp_mst_i2c_write(mstb, port, msgs, num); } else { - DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); ret = -EIO; } -- cgit v1.2.3 From 98025a62cb00778a467dbc359d647c9515d51b4e Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Thu, 29 Apr 2021 18:11:51 -0400 Subject: drm/dp_mst: Use Extended Base Receiver Capability DPCD space [why] DP 1.4a spec mandates that if DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT is set, Extended Base Receiver Capability DPCD space must be used. Without doing that, the three DPCD values that differ will be wrong, leading to incorrect or limited functionality. MST link rate, for example, could have a lower value. Also, Synaptics quirk wouldn't work out well when Extended DPCD was not read, resulting in no DSC for such hubs. [how] Modify MST topology manager to use the values from Extended DPCD where applicable. To prevent regression on the sources that have a lower maximum link rate capability than MAX_LINK_RATE from Extended DPCD, have the drivers supply maximum lane count and rate at initialization time. This also reverts commit 2dcab875e763 ("Revert drm/dp_mst: Retrieve extended DPCD caps for topology manager"), brining the change back to the original commit ad44c03208e4 ("drm/dp_mst: Retrieve extended DPCD caps for topology manager"). Signed-off-by: Nikola Cornij Reviewed-by: Lyude Paul Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20210429221151.22020-2-nikola.cornij@amd.com --- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 5 ++++ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 18 ++++++++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++ drivers/gpu/drm/drm_dp_mst_topology.c | 33 ++++++++++++++-------- drivers/gpu/drm/i915/display/intel_dp_mst.c | 6 +++- drivers/gpu/drm/nouveau/dispnv50/disp.c | 3 +- drivers/gpu/drm/radeon/radeon_dp_mst.c | 7 +++++ include/drm/drm_dp_mst_helper.h | 12 +++++++- 8 files changed, 71 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 997567f6f0ba..ef8d53e24c47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -429,6 +429,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int link_index) { + struct dc_link_settings max_link_enc_cap = {0}; + aconnector->dm_dp_aux.aux.name = kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", link_index); @@ -443,6 +445,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) return; + dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, @@ -450,6 +453,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, &aconnector->dm_dp_aux.aux, 16, 4, + (u8)max_link_enc_cap.lane_count, + (u8)max_link_enc_cap.link_rate, aconnector->connector_id); drm_connector_attach_dp_subconnector_property(&aconnector->base); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7d2e433c2275..6fe66b7ee53e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1894,6 +1894,24 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) return true; } +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ + if (!max_link_enc_cap) { + DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); + return false; + } + + if (link->link_enc->funcs->get_max_link_cap) { + link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap); + return true; + } + + DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); + max_link_enc_cap->lane_count = 1; + max_link_enc_cap->link_rate = 6; + return false; +} + static struct dc_link_settings get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index b0013e674864..cb6d0543d839 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -346,6 +346,8 @@ bool dc_link_dp_set_test_pattern( const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap); + void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 5539a91b4031..54604633e65c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3708,19 +3708,24 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* set the device into MST mode */ if (mst_state) { struct drm_dp_payload reset_pay; + int lane_count; + int link_rate; WARN_ON(mgr->mst_primary); /* get dpcd info */ - ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (ret != DP_RECEIVER_CAP_SIZE) { - drm_dbg_kms(mgr->dev, "failed to read DPCD\n"); + ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", + mgr->aux->name, ret); goto out_unlock; } + lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count); + link_rate = min_t(int, mgr->dpcd[1], mgr->max_link_rate); mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr, - drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), - mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); + drm_dp_bw_code_to_link_rate(link_rate), + lane_count); if (mgr->pbn_div == 0) { ret = -EINVAL; goto out_unlock; @@ -5448,14 +5453,17 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); * @aux: DP helper aux channel to talk to this device * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit * @max_payloads: maximum number of payloads this GPU can source + * @max_lane_count: maximum number of lanes this GPU supports + * @max_link_rate: maximum link rate this GPU supports, units as in DPCD * @conn_base_id: the connector object ID the MST device is connected to. * * Return 0 for success, or negative error code on failure */ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, - int max_dpcd_transaction_bytes, - int max_payloads, int conn_base_id) + int max_dpcd_transaction_bytes, int max_payloads, + u8 max_lane_count, u8 max_link_rate, + int conn_base_id) { struct drm_dp_mst_topology_state *mst_state; @@ -5490,6 +5498,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mgr->aux = aux; mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; mgr->max_payloads = max_payloads; + mgr->max_lane_count = max_lane_count; + mgr->max_link_rate = max_link_rate; mgr->conn_base_id = conn_base_id; if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) @@ -5896,14 +5906,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && port->parent == port->mgr->mst_primary) { - u8 downstreamport; + u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; - if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT, - &downstreamport, 1) < 0) + if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0) return NULL; - if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) && - ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK) + if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && + ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) != DP_DWN_STRM_PORT_TYPE_ANALOG)) return port->mgr->aux; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 860381d68d9d..bf7f8487945c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -942,6 +942,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) struct intel_dp *intel_dp = &dig_port->dp; enum port port = dig_port->base.port; int ret; + int bios_max_link_rate; if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) return 0; @@ -956,8 +957,11 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) /* create encoders */ intel_dp_create_fake_mst_encoders(dig_port); + bios_max_link_rate = intel_bios_dp_max_link_rate(&dig_port->base); ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, - &intel_dp->aux, 16, 3, conn_base_id); + &intel_dp->aux, 16, 3, + (u8)dig_port->max_lanes, + (u8)(bios_max_link_rate / 27000), conn_base_id); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 1c9c0cdf85db..c46d0374b6e6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1617,7 +1617,8 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, mstm->mgr.cbs = &nv50_mstm; ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max, - max_payloads, conn_base_id); + (u8)max_payloads, outp->dcb->dpconf.link_nr, + (u8)outp->dcb->dpconf.link_bw, conn_base_id); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 59cf1d288465..13072c2a6502 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -629,13 +629,20 @@ int radeon_dp_mst_init(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; + int max_link_rate; if (!radeon_connector->ddc_bus->has_aux) return 0; + if (radeon_connector_is_dp12_capable(&radeon_connector->base)) + max_link_rate = 0x14; + else + max_link_rate = 0x0a; + radeon_connector->mst_mgr.cbs = &mst_cbs; return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev, &radeon_connector->ddc_bus->aux, 16, 6, + 4, (u8)max_link_rate, radeon_connector->base.base.id); } diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 20dc705642bd..c87a829b6498 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -593,6 +593,14 @@ struct drm_dp_mst_topology_mgr { * @max_payloads: maximum number of payloads the GPU can generate. */ int max_payloads; + /** + * @max_lane_count: maximum number of lanes the GPU can drive. + */ + u8 max_lane_count; + /** + * @max_link_rate: maximum link rate per lane GPU can output. + */ + u8 max_link_rate; /** * @conn_base_id: DRM connector ID this mgr is connected to. Only used * to build the MST connector path value. @@ -765,7 +773,9 @@ struct drm_dp_mst_topology_mgr { int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, - int max_payloads, int conn_base_id); + int max_payloads, + u8 max_lane_count, u8 max_link_rate, + int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); -- cgit v1.2.3