diff options
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 256 |
1 files changed, 206 insertions, 50 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index d3fc7e4e85b7..ae5f06895562 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -737,16 +744,16 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { - bool ret; + unsigned int state; /* * All updates to txmsg->state are protected by mgr->qlock, and the two * cases we check here are terminal states. For those the barriers * provided by the wake_up/wait_event pair are enough. */ - ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || - txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); - return ret; + state = READ_ONCE(txmsg->state); + return (state == DRM_DP_SIDEBAND_TX_RX || + state == DRM_DP_SIDEBAND_TX_TIMEOUT); } static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, @@ -855,7 +862,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) mutex_unlock(&mstb->mgr->qlock); if (wake_tx) - wake_up(&mstb->mgr->tx_waitq); + wake_up_all(&mstb->mgr->tx_waitq); kref_put(kref, drm_dp_free_mst_branch_device); } @@ -1510,7 +1517,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - wake_up(&mgr->tx_waitq); + wake_up_all(&mgr->tx_waitq); } } @@ -2164,7 +2171,7 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2258,7 +2276,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mstb->tx_slots[slot] = NULL; mutex_unlock(&mgr->qlock); - wake_up(&mgr->tx_waitq); + wake_up_all(&mgr->tx_waitq); } return ret; } @@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; @@ -2498,6 +2523,81 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, } /** + * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state + * @state: global atomic state + * @mgr: MST topology manager for the port + * @port: port to find vcpi slots for + * @pbn: bandwidth required for the mode in PBN + * + * RETURNS: + * Total slots in the atomic state assigned for this port or error + */ +int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn) +{ + struct drm_dp_mst_topology_state *topology_state; + int req_slots; + + topology_state = drm_atomic_get_mst_topology_state(state, mgr); + if (topology_state == NULL) + return -ENOMEM; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (port == NULL) + return -EINVAL; + req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n", + req_slots, topology_state->avail_slots); + + if (req_slots > topology_state->avail_slots) { + drm_dp_put_port(port); + return -ENOSPC; + } + + topology_state->avail_slots -= req_slots; + DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); + + drm_dp_put_port(port); + return req_slots; +} +EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); + +/** + * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots + * @state: global atomic state + * @mgr: MST topology manager for the port + * @slots: number of vcpi slots to release + * + * RETURNS: + * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or + * negative error code + */ +int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + int slots) +{ + struct drm_dp_mst_topology_state *topology_state; + + topology_state = drm_atomic_get_mst_topology_state(state, mgr); + if (topology_state == NULL) + return -ENOMEM; + + /* We cannot rely on port->vcpi.num_slots to update + * topology_state->avail_slots as the port may not exist if the parent + * branch device was unplugged. This should be fixed by tracking + * per-port slot allocation in drm_dp_mst_topology_state instead of + * depending on the caller to tell us how many slots to release. + */ + topology_state->avail_slots += slots; + DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n", + slots, topology_state->avail_slots); + + return 0; +} +EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); + +/** * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel * @mgr: manager for this port * @port: port to allocate a virtual channel for. @@ -2761,16 +2861,15 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf) { - int ret; int i; - for (i = 0; i < 4; i++) { - ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); - if (ret != 16) - break; + + for (i = 0; i < 64; i += 16) { + if (drm_dp_dpcd_read(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) != 16) + return false; } - if (i == 4) - return true; - return false; + return true; } static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, @@ -2834,42 +2933,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m, mutex_lock(&mgr->lock); if (mgr->mst_primary) { u8 buf[64]; - bool bret; int ret; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); - seq_printf(m, "dpcd: "); - for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) - seq_printf(m, "%02x ", buf[i]); - seq_printf(m, "\n"); + seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - seq_printf(m, "faux/mst: "); - for (i = 0; i < 2; i++) - seq_printf(m, "%02x ", buf[i]); - seq_printf(m, "\n"); + seq_printf(m, "faux/mst: %*ph\n", 2, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - seq_printf(m, "mst ctrl: "); - for (i = 0; i < 1; i++) - seq_printf(m, "%02x ", buf[i]); - seq_printf(m, "\n"); + seq_printf(m, "mst ctrl: %*ph\n", 1, buf); /* dump the standard OUI branch header */ ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - seq_printf(m, "branch oui: "); - for (i = 0; i < 0x3; i++) - seq_printf(m, "%02x", buf[i]); - seq_printf(m, " devid: "); + seq_printf(m, "branch oui: %*phN devid: ", 3, buf); for (i = 0x3; i < 0x8 && buf[i]; i++) seq_printf(m, "%c", buf[i]); - - seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); - seq_printf(m, "\n"); - bret = dump_dp_payload_table(mgr, buf); - if (bret == true) { - seq_printf(m, "payload table: "); - for (i = 0; i < 63; i++) - seq_printf(m, "%02x ", buf[i]); - seq_printf(m, "\n"); - } + seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", + buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); + if (dump_dp_payload_table(mgr, buf)) + seq_printf(m, "payload table: %*ph\n", 63, buf); } @@ -2936,6 +3017,69 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) (*mgr->cbs->hotplug)(mgr); } +void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj) +{ + struct drm_dp_mst_topology_mgr *mgr = obj; + struct drm_dp_mst_topology_state *new_mst_state; + + if (WARN_ON(!mgr->state)) + return NULL; + + new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL); + if (new_mst_state) + new_mst_state->state = state; + return new_mst_state; +} + +void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr) +{ + struct drm_dp_mst_topology_mgr *mgr = obj; + struct drm_dp_mst_topology_state **topology_state_ptr; + + topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr; + + mgr->state->state = (*topology_state_ptr)->state; + swap(*topology_state_ptr, mgr->state); + mgr->state->state = NULL; +} + +void drm_dp_mst_destroy_state(void *obj_state) +{ + kfree(obj_state); +} + +static const struct drm_private_state_funcs mst_state_funcs = { + .duplicate_state = drm_dp_mst_duplicate_state, + .swap_state = drm_dp_mst_swap_state, + .destroy_state = drm_dp_mst_destroy_state, +}; + +/** + * drm_atomic_get_mst_topology_state: get MST topology state + * + * @state: global atomic state + * @mgr: MST topology manager, also the private object in this case + * + * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic + * state vtable so that the private object state returned is that of a MST + * topology object. Also, drm_atomic_get_private_obj_state() expects the caller + * to care of the locking, so warn if don't hold the connection_mutex. + * + * RETURNS: + * + * The MST topology state or error pointer. + */ +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_device *dev = mgr->dev; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + return drm_atomic_get_private_obj_state(state, mgr, + &mst_state_funcs); +} +EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); + /** * drm_dp_mst_topology_mgr_init - initialise a topology manager * @mgr: manager struct to initialise @@ -2980,6 +3124,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (test_calc_pbn_mode() < 0) DRM_ERROR("MST PBN self-test failed\n"); + mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL); + if (mgr->state == NULL) + return -ENOMEM; + mgr->state->mgr = mgr; + + /* max. time slots - one slot for MTP header */ + mgr->state->avail_slots = 63; + mgr->funcs = &mst_state_funcs; + return 0; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); @@ -3000,6 +3153,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) mutex_unlock(&mgr->payload_lock); mgr->dev = NULL; mgr->aux = NULL; + kfree(mgr->state); + mgr->state = NULL; + mgr->funcs = NULL; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); |