diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-01-03 15:21:58 +0300 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-01-03 15:21:58 +0300 |
| commit | e0d07ba76bd1b074505e4b3b32fe45c3b5d087bd (patch) | |
| tree | 096d715c6f1752c680b90cf5ef038de3177fa961 | |
| parent | e681a9d2050cdcaf24adeabc9f1aea6cff94be22 (diff) | |
| parent | fa487b2a900d7b22fe7db678d2134fbf56ae9da0 (diff) | |
| download | linux-e0d07ba76bd1b074505e4b3b32fe45c3b5d087bd.tar.xz | |
Merge tag 'thunderbolt-for-v5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next
Mika writes:
thunderbolt: Changes for v5.17 merge window
This includes following Thunderbolt/USB4 changes for the v5.17 merge
window:
* Enable low-power link state (CL0s) for USB4 and Intel Titan Ridge
devices
* Add support for TMU (Time Management Unit) uni-directional mode
* Power management improvements (suspend-to-disk, runtime PM)
* USB4 compatibility fixes
* Minor fixes and cleanups.
All these have been in linux-next with no reported issues.
* tag 'thunderbolt-for-v5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
thunderbolt: Add module parameter for CLx disabling
thunderbolt: Enable CL0s for Intel Titan Ridge
thunderbolt: Rename Intel TB_VSE_CAP_IECS capability
thunderbolt: Implement TMU time disruption for Intel Titan Ridge
thunderbolt: Move usb4_switch_wait_for_bit() to switch.c
thunderbolt: Add CL0s support for USB4 routers
thunderbolt: Add TMU uni-directional mode
thunderbolt: Check return value of kmemdup() in icm_handle_event()
thunderbolt: Do not dereference fwnode in struct device
thunderbolt: Add debug logging of DisplayPort resource allocation
thunderbolt: Do not program path HopIDs for USB4 routers
thunderbolt: Do not allow subtracting more NFC credits than configured
thunderbolt: Runtime resume USB4 port when retimers are scanned
thunderbolt: Tear down existing tunnels when resuming from hibernate
thunderbolt: Runtime PM activate both ends of the device link
thunderbolt: xdomain: Avoid potential stack OOB read
| -rw-r--r-- | drivers/thunderbolt/acpi.c | 15 | ||||
| -rw-r--r-- | drivers/thunderbolt/icm.c | 7 | ||||
| -rw-r--r-- | drivers/thunderbolt/lc.c | 24 | ||||
| -rw-r--r-- | drivers/thunderbolt/path.c | 42 | ||||
| -rw-r--r-- | drivers/thunderbolt/retimer.c | 28 | ||||
| -rw-r--r-- | drivers/thunderbolt/switch.c | 493 | ||||
| -rw-r--r-- | drivers/thunderbolt/tb.c | 91 | ||||
| -rw-r--r-- | drivers/thunderbolt/tb.h | 106 | ||||
| -rw-r--r-- | drivers/thunderbolt/tb_msgs.h | 47 | ||||
| -rw-r--r-- | drivers/thunderbolt/tb_regs.h | 113 | ||||
| -rw-r--r-- | drivers/thunderbolt/tmu.c | 337 | ||||
| -rw-r--r-- | drivers/thunderbolt/tunnel.c | 27 | ||||
| -rw-r--r-- | drivers/thunderbolt/tunnel.h | 9 | ||||
| -rw-r--r-- | drivers/thunderbolt/usb4.c | 52 | ||||
| -rw-r--r-- | drivers/thunderbolt/xdomain.c | 16 |
15 files changed, 1203 insertions, 204 deletions
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index b67e72d5644b..79b5abf9d042 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -7,6 +7,7 @@ */ #include <linux/acpi.h> +#include <linux/pm_runtime.h> #include "tb.h" @@ -31,7 +32,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, return AE_OK; /* It needs to reference this NHI */ - if (nhi->pdev->dev.fwnode != args.fwnode) + if (dev_fwnode(&nhi->pdev->dev) != args.fwnode) goto out_put; /* @@ -74,8 +75,18 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { const struct device_link *link; + /* + * Make them both active first to make sure the NHI does + * not runtime suspend before the consumer. The + * pm_runtime_put() below then allows the consumer to + * runtime suspend again (which then allows NHI runtime + * suspend too now that the device link is established). + */ + pm_runtime_get_sync(&pdev->dev); + link = device_link_add(&pdev->dev, &nhi->pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME); if (link) { dev_dbg(&nhi->pdev->dev, "created link from %s\n", @@ -84,6 +95,8 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", dev_name(&pdev->dev)); } + + pm_runtime_put(&pdev->dev); } out_put: diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 6255f1ef9599..fff0c740c8f3 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1741,8 +1741,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, if (!n) return; - INIT_WORK(&n->work, icm_handle_notification); n->pkg = kmemdup(buf, size, GFP_KERNEL); + if (!n->pkg) { + kfree(n); + return; + } + + INIT_WORK(&n->work, icm_handle_notification); n->tb = tb; queue_work(tb->wq, &n->work); diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index c178f0d7beab..53495a38b4eb 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -193,6 +193,30 @@ int tb_lc_start_lane_initialization(struct tb_port *port) return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); } +/** + * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter + * @port: Lane adapter + * + * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool tb_lc_is_clx_supported(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_LINK_ATTR_CPS); +} + static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset, unsigned int flags) { diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 564e2f42cebd..299712accfe9 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -85,11 +85,12 @@ static int tb_path_find_src_hopid(struct tb_port *src, * @dst_hopid: HopID to the @dst (%-1 if don't care) * @last: Last port is filled here if not %NULL * @name: Name of the path + * @alloc_hopid: Allocate HopIDs for the ports * * Follows a path starting from @src and @src_hopid to the last output - * port of the path. Allocates HopIDs for the visited ports. Call - * tb_path_free() to release the path and allocated HopIDs when the path - * is not needed anymore. + * port of the path. Allocates HopIDs for the visited ports (if + * @alloc_hopid is true). Call tb_path_free() to release the path and + * allocated HopIDs when the path is not needed anymore. * * Note function discovers also incomplete paths so caller should check * that the @dst port is the expected one. If it is not, the path can be @@ -99,7 +100,8 @@ static int tb_path_find_src_hopid(struct tb_port *src, */ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, - struct tb_port **last, const char *name) + struct tb_port **last, const char *name, + bool alloc_hopid) { struct tb_port *out_port; struct tb_regs_hop hop; @@ -156,6 +158,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, path->tb = src->sw->tb; path->path_length = num_hops; path->activated = true; + path->alloc_hopid = alloc_hopid; path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); if (!path->hops) { @@ -177,13 +180,14 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, goto err; } - if (tb_port_alloc_in_hopid(p, h, h) < 0) + if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0) goto err; out_port = &sw->ports[hop.out_port]; next_hop = hop.next_hop; - if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { + if (alloc_hopid && + tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { tb_port_release_in_hopid(p, h); goto err; } @@ -263,6 +267,8 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, return NULL; } + path->alloc_hopid = true; + in_hopid = src_hopid; out_port = NULL; @@ -345,17 +351,19 @@ err: */ void tb_path_free(struct tb_path *path) { - int i; - - for (i = 0; i < path->path_length; i++) { - const struct tb_path_hop *hop = &path->hops[i]; - - if (hop->in_port) - tb_port_release_in_hopid(hop->in_port, - hop->in_hop_index); - if (hop->out_port) - tb_port_release_out_hopid(hop->out_port, - hop->next_hop_index); + if (path->alloc_hopid) { + int i; + + for (i = 0; i < path->path_length; i++) { + const struct tb_path_hop *hop = &path->hops[i]; + + if (hop->in_port) + tb_port_release_in_hopid(hop->in_port, + hop->in_hop_index); + if (hop->out_port) + tb_port_release_out_hopid(hop->out_port, + hop->next_hop_index); + } } kfree(path->hops); diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 722694052f4a..8c29bd556ae0 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -324,15 +324,10 @@ struct device_type tb_retimer_type = { static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) { - struct usb4_port *usb4; struct tb_retimer *rt; u32 vendor, device; int ret; - usb4 = port->usb4; - if (!usb4) - return -EINVAL; - ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, sizeof(vendor)); if (ret) { @@ -374,7 +369,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) rt->port = port; rt->tb = port->sw->tb; - rt->dev.parent = &usb4->dev; + rt->dev.parent = &port->usb4->dev; rt->dev.bus = &tb_bus_type; rt->dev.type = &tb_retimer_type; dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), @@ -453,6 +448,13 @@ int tb_retimer_scan(struct tb_port *port, bool add) { u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; int ret, i, last_idx = 0; + struct usb4_port *usb4; + + usb4 = port->usb4; + if (!usb4) + return 0; + + pm_runtime_get_sync(&usb4->dev); /* * Send broadcast RT to make sure retimer indices facing this @@ -460,7 +462,7 @@ int tb_retimer_scan(struct tb_port *port, bool add) */ ret = usb4_port_enumerate_retimers(port); if (ret) - return ret; + goto out; /* * Enable sideband channel for each retimer. We can do this @@ -490,8 +492,10 @@ int tb_retimer_scan(struct tb_port *port, bool add) break; } - if (!last_idx) - return 0; + if (!last_idx) { + ret = 0; + goto out; + } /* Add on-board retimers if they do not exist already */ for (i = 1; i <= last_idx; i++) { @@ -507,7 +511,11 @@ int tb_retimer_scan(struct tb_port *port, bool add) } } - return 0; +out: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret; } static int remove_retimer(struct device *dev, void *data) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 3014146081c1..d026e305fe5d 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -13,6 +13,7 @@ #include <linux/sched/signal.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <linux/module.h> #include "tb.h" @@ -26,6 +27,10 @@ struct nvm_auth_status { u32 status; }; +static bool clx_enabled = true; +module_param_named(clx, clx_enabled, bool, 0444); +MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); + /* * Hold NVM authentication failure status per switch This information * needs to stay around even when the switch gets power cycled so we @@ -623,6 +628,9 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) return 0; nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; + if (credits < 0) + credits = max_t(int, -nfc_credits, credits); + nfc_credits += credits; tb_port_dbg(port, "adding %d NFC credits to %lu", credits, @@ -1319,7 +1327,9 @@ int tb_dp_port_hpd_clear(struct tb_port *port) * @aux_tx: AUX TX Hop ID * @aux_rx: AUX RX Hop ID * - * Programs specified Hop IDs for DP IN/OUT port. + * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 + * router DP adapters too but does not program the values as the fields + * are read-only. */ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, unsigned int aux_tx, unsigned int aux_rx) @@ -1327,6 +1337,9 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, u32 data[2]; int ret; + if (tb_switch_is_usb4(port->sw)) + return 0; + ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) @@ -1449,6 +1462,40 @@ int tb_switch_reset(struct tb_switch *sw) return res.err; } +/** + * tb_switch_wait_for_bit() - Wait for specified value of bits in offset + * @sw: Router to read the offset value from + * @offset: Offset in the router config space to read from + * @bit: Bit mask in the offset to wait for + * @value: Value of the bits to wait for + * @timeout_msec: Timeout in ms how long to wait + * + * Wait till the specified bits in specified offset reach specified value. + * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached + * within the given timeout or a negative errno in case of failure. + */ +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + /* * tb_plug_events_active() - enable/disable plug events on a switch * @@ -2186,10 +2233,18 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, if (ret > 0) sw->cap_plug_events = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); + if (ret > 0) + sw->cap_vsec_tmu = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (ret > 0) sw->cap_lc = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); + if (ret > 0) + sw->cap_lp = ret; + /* Root switch is always authorized */ if (!route) sw->authorized = true; @@ -2996,6 +3051,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime) tb_sw_dbg(sw, "suspending switch\n"); + /* + * Actually only needed for Titan Ridge but for simplicity can be + * done for USB4 device too as CLx is re-enabled at resume. + */ + if (tb_switch_disable_clx(sw, TB_CL0S)) + tb_sw_warn(sw, "failed to disable CLx on upstream port\n"); + err = tb_plug_events_active(sw, false); if (err) return; @@ -3048,9 +3110,20 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + int ret; + if (tb_switch_is_usb4(sw)) - return usb4_switch_alloc_dp_resource(sw, in); - return tb_lc_dp_sink_alloc(sw, in); + ret = usb4_switch_alloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_alloc(sw, in); + + if (ret) + tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", + in->port); + else + tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); + + return ret; } /** @@ -3073,6 +3146,8 @@ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) if (ret) tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", in->port); + else + tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); } struct tb_sw_lookup { @@ -3202,3 +3277,415 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw, return NULL; } + +static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) +{ + u32 phy; + int ret; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (secondary) + phy |= LANE_ADP_CS_1_PMS; + else + phy &= ~LANE_ADP_CS_1_PMS; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_pm_secondary_enable(struct tb_port *port) +{ + return __tb_port_pm_secondary_set(port, true); +} + +static int tb_port_pm_secondary_disable(struct tb_port *port) +{ + return __tb_port_pm_secondary_set(port, false); +} + +static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_port_pm_secondary_enable(up); + if (ret) + return ret; + + return tb_port_pm_secondary_disable(down); +} + +/* Called for USB4 or Titan Ridge routers only */ +static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx) +{ + u32 mask, val; + bool ret; + + /* Don't enable CLx in case of two single-lane links */ + if (!port->bonded && port->dual_link_port) + return false; + + /* Don't enable CLx in case of inter-domain link */ + if (port->xdomain) + return false; + + if (tb_switch_is_usb4(port->sw)) { + if (!usb4_port_clx_supported(port)) + return false; + } else if (!tb_lc_is_clx_supported(port)) { + return false; + } + + switch (clx) { + case TB_CL0S: + /* CL0s support requires also CL1 support */ + mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; + break; + + /* For now we support only CL0s. Not CL1, CL2 */ + case TB_CL1: + case TB_CL2: + default: + return false; + } + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return false; + + return !!(val & mask); +} + +static inline bool tb_port_cl0s_supported(struct tb_port *port) +{ + return tb_port_clx_supported(port, TB_CL0S); +} + +static int __tb_port_cl0s_set(struct tb_port *port, bool enable) +{ + u32 phy, mask; + int ret; + + /* To enable CL0s also required to enable CL1 */ + mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy |= mask; + else + phy &= ~mask; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_cl0s_disable(struct tb_port *port) +{ + return __tb_port_cl0s_set(port, false); +} + +static int tb_port_cl0s_enable(struct tb_port *port) +{ + return __tb_port_cl0s_set(port, true); +} + +static int tb_switch_enable_cl0s(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + bool up_cl0s_support, down_cl0s_support; + struct tb_port *up, *down; + int ret; + + if (!tb_switch_is_clx_supported(sw)) + return 0; + + /* + * Enable CLx for host router's downstream port as part of the + * downstream router enabling procedure. + */ + if (!tb_route(sw)) + return 0; + + /* Enable CLx only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + ret = tb_switch_pm_secondary_resolve(sw); + if (ret) + return ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + up_cl0s_support = tb_port_cl0s_supported(up); + down_cl0s_support = tb_port_cl0s_supported(down); + + tb_port_dbg(up, "CL0s %ssupported\n", + up_cl0s_support ? "" : "not "); + tb_port_dbg(down, "CL0s %ssupported\n", + down_cl0s_support ? "" : "not "); + + if (!up_cl0s_support || !down_cl0s_support) + return -EOPNOTSUPP; + + ret = tb_port_cl0s_enable(up); + if (ret) + return ret; + + ret = tb_port_cl0s_enable(down); + if (ret) { + tb_port_cl0s_disable(up); + return ret; + } + + ret = tb_switch_mask_clx_objections(sw); + if (ret) { + tb_port_cl0s_disable(up); + tb_port_cl0s_disable(down); + return ret; + } + + sw->clx = TB_CL0S; + + tb_port_dbg(up, "CL0s enabled\n"); + return 0; +} + +/** + * tb_switch_enable_clx() - Enable CLx on upstream port of specified router + * @sw: Router to enable CLx for + * @clx: The CLx state to enable + * + * Enable CLx state only for first hop router. That is the most common + * use-case, that is intended for better thermal management, and so helps + * to improve performance. CLx is enabled only if both sides of the link + * support CLx, and if both sides of the link are not configured as two + * single lane links and only if the link is not inter-domain link. The + * complete set of conditions is descibed in CM Guide 1.0 section 8.1. + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) +{ + struct tb_switch *root_sw = sw->tb->root_switch; + + if (!clx_enabled) + return 0; + + /* + * CLx is not enabled and validated on Intel USB4 platforms before + * Alder Lake. + */ + if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) + return 0; + + switch (clx) { + case TB_CL0S: + return tb_switch_enable_cl0s(sw); + + default: + return -EOPNOTSUPP; + } +} + +static int tb_switch_disable_cl0s(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + if (!tb_switch_is_clx_supported(sw)) + return 0; + + /* + * Disable CLx for host router's downstream port as part of the + * downstream router enabling procedure. + */ + if (!tb_route(sw)) + return 0; + + /* Disable CLx only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_port_cl0s_disable(up); + if (ret) + return ret; + + ret = tb_port_cl0s_disable(down); + if (ret) + return ret; + + sw->clx = TB_CLX_DISABLE; + + tb_port_dbg(up, "CL0s disabled\n"); + return 0; +} + +/** + * tb_switch_disable_clx() - Disable CLx on upstream port of specified router + * @sw: Router to disable CLx for + * @clx: The CLx state to disable + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) +{ + if (!clx_enabled) + return 0; + + switch (clx) { + case TB_CL0S: + return tb_switch_disable_cl0s(sw); + + default: + return -EOPNOTSUPP; + } +} + +/** + * tb_switch_mask_clx_objections() - Mask CLx objections for a router + * @sw: Router to mask objections for + * + * Mask the objections coming from the second depth routers in order to + * stop these objections from interfering with the CLx states of the first + * depth link. + */ +int tb_switch_mask_clx_objections(struct tb_switch *sw) +{ + int up_port = sw->config.upstream_port_number; + u32 offset, val[2], mask_obj, unmask_obj; + int ret, i; + + /* Only Titan Ridge of pre-USB4 devices support CLx states */ + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + if (!tb_route(sw)) + return 0; + + /* + * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: + * Port A consists of lane adapters 1,2 and + * Port B consists of lane adapters 3,4 + * If upstream port is A, (lanes are 1,2), we mask objections from + * port B (lanes 3,4) and unmask objections from Port A and vice-versa. + */ + if (up_port == 1) { + mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + offset = TB_LOW_PWR_C1_CL1; + } else { + mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + offset = TB_LOW_PWR_C3_CL1; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] |= mask_obj; + val[i] &= ~unmask_obj; + } + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); +} + +/* + * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 + * device. For now used only for Titan Ridge. + */ +static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, + unsigned int pcie_offset, u32 value) +{ + u32 offset, command, val; + int ret; + + if (sw->generation != 3) + return -EOPNOTSUPP; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; + ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; + command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); + command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; + command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL + << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; + command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; + + ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + ret = tb_switch_wait_for_bit(sw, offset, + TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) + return -ETIMEDOUT; + + return 0; +} + +/** + * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state + * @sw: Router to enable PCIe L1 + * + * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable + * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel + * was configured. Due to Intel platforms limitation, shall be called only + * for first hop switch. + */ +int tb_switch_pcie_l1_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + int ret; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + /* Enable PCIe L1 enable only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + /* Write to downstream PCIe bridge #5 aka Dn4 */ + ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); + if (ret) + return ret; + + /* Write to Upstream PCIe bridge #0 aka Up0 */ + return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); +} diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 2897a77d44c3..cbd0ad85ffb1 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -105,10 +105,11 @@ static void tb_remove_dp_resources(struct tb_switch *sw) } } -static void tb_discover_tunnels(struct tb_switch *sw) +static void tb_switch_discover_tunnels(struct tb_switch *sw, + struct list_head *list, + bool alloc_hopids) { struct tb *tb = sw->tb; - struct tb_cm *tcm = tb_priv(tb); struct tb_port *port; tb_switch_for_each_port(sw, port) { @@ -116,24 +117,41 @@ static void tb_discover_tunnels(struct tb_switch *sw) switch (port->config.type) { case TB_TYPE_DP_HDMI_IN: - tunnel = tb_tunnel_discover_dp(tb, port); + tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); break; case TB_TYPE_PCIE_DOWN: - tunnel = tb_tunnel_discover_pci(tb, port); + tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); break; case TB_TYPE_USB3_DOWN: - tunnel = tb_tunnel_discover_usb3(tb, port); + tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); break; default: break; } - if (!tunnel) - continue; + if (tunnel) + list_add_tail(&tunnel->list, list); + } + + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) { + tb_switch_discover_tunnels(port->remote->sw, list, + alloc_hopids); + } + } +} +static void tb_discover_tunnels(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_pci(tunnel)) { struct tb_switch *parent = tunnel->dst_port->sw; @@ -146,13 +164,6 @@ static void tb_discover_tunnels(struct tb_switch *sw) pm_runtime_get_sync(&tunnel->src_port->sw->dev); pm_runtime_get_sync(&tunnel->dst_port->sw->dev); } - - list_add_tail(&tunnel->list, &tcm->tunnel_list); - } - - tb_switch_for_each_port(sw, port) { - if (tb_port_has_remote(port)) - tb_discover_tunnels(port->remote->sw); } } @@ -210,7 +221,7 @@ static int tb_enable_tmu(struct tb_switch *sw) int ret; /* If it is already enabled in correct mode, don't touch it */ - if (tb_switch_tmu_is_enabled(sw)) + if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request)) return 0; ret = tb_switch_tmu_disable(sw); @@ -658,6 +669,11 @@ static void tb_scan_port(struct tb_port *port) tb_switch_lane_bonding_enable(sw); /* Set the link configured */ tb_switch_configure_link(sw); + if (tb_switch_enable_clx(sw, TB_CL0S)) + tb_sw_warn(sw, "failed to enable CLx on upstream port\n"); + + tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, + tb_switch_is_clx_enabled(sw)); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to enable TMU\n"); @@ -1076,6 +1092,13 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) return -EIO; } + /* + * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it + * here. + */ + if (tb_switch_pcie_l1_enable(sw)) + tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); + list_add_tail(&tunnel->list, &tcm->tunnel_list); return 0; } @@ -1364,12 +1387,13 @@ static int tb_start(struct tb *tb) return ret; } + tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false); /* Enable TMU if it is off */ tb_switch_tmu_enable(tb->root_switch); /* Full scan to discover devices added before the driver was loaded. */ tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ - tb_discover_tunnels(tb->root_switch); + tb_discover_tunnels(tb); /* * If the boot firmware did not create USB 3.x tunnels create them * now for the whole topology. @@ -1407,6 +1431,14 @@ static void tb_restore_children(struct tb_switch *sw) if (sw->is_unplugged) return; + if (tb_switch_enable_clx(sw, TB_CL0S)) + tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n"); + + /* + * tb_switch_tmu_configure() was already called when the switch was + * added before entering system sleep or runtime suspend, + * so no need to call it again before enabling TMU. + */ if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to restore TMU configuration\n"); @@ -1429,6 +1461,8 @@ static int tb_resume_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel, *n; + unsigned int usb3_delay = 0; + LIST_HEAD(tunnels); tb_dbg(tb, "resuming...\n"); @@ -1439,8 +1473,31 @@ static int tb_resume_noirq(struct tb *tb) tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); tb_restore_children(tb->root_switch); - list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) + + /* + * If we get here from suspend to disk the boot firmware or the + * restore kernel might have created tunnels of its own. Since + * we cannot be sure they are usable for us we find and tear + * them down. + */ + tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); + list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { + if (tb_tunnel_is_usb3(tunnel)) + usb3_delay = 500; + tb_tunnel_deactivate(tunnel); + tb_tunnel_free(tunnel); + } + + /* Re-create our tunnels now */ + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + /* USB3 requires delay before it can be re-activated */ + if (tb_tunnel_is_usb3(tunnel)) { + msleep(usb3_delay); + /* Only need to do it once */ + usb3_delay = 0; + } tb_tunnel_restart(tunnel); + } if (!list_empty(&tcm->tunnel_list)) { /* * the pcie links need some time to get going. diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 725104c83e3d..74d3b14f004e 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -89,15 +89,31 @@ enum tb_switch_tmu_rate { * @cap: Offset to the TMU capability (%0 if not found) * @has_ucap: Does the switch support uni-directional mode * @rate: TMU refresh rate related to upstream switch. In case of root - * switch this holds the domain rate. + * switch this holds the domain rate. Reflects the HW setting. * @unidirectional: Is the TMU in uni-directional or bi-directional mode - * related to upstream switch. Don't case for root switch. + * related to upstream switch. Don't care for root switch. + * Reflects the HW setting. + * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional + * that is requested to be set. Related to upstream switch. + * Don't care for root switch. + * @rate_request: TMU new refresh rate related to upstream switch that is + * requested to be set. In case of root switch, this holds + * the new domain rate that is requested to be set. */ struct tb_switch_tmu { int cap; bool has_ucap; enum tb_switch_tmu_rate rate; bool unidirectional; + bool unidirectional_request; + enum tb_switch_tmu_rate rate_request; +}; + +enum tb_clx { + TB_CLX_DISABLE, + TB_CL0S, + TB_CL1, + TB_CL2, }; /** @@ -122,7 +138,9 @@ struct tb_switch_tmu { * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) + * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) + * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) @@ -148,6 +166,7 @@ struct tb_switch_tmu { * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN * @max_pcie_credits: Router preferred number of buffers for PCIe * @max_dma_credits: Router preferred number of buffers for DMA/P2P + * @clx: CLx state on the upstream link of the router * * When the switch is being added or removed to the domain (other * switches) you need to have domain lock held. @@ -172,7 +191,9 @@ struct tb_switch { bool link_usb4; unsigned int generation; int cap_plug_events; + int cap_vsec_tmu; int cap_lc; + int cap_lp; bool is_unplugged; u8 *drom; struct tb_nvm *nvm; @@ -196,6 +217,7 @@ struct tb_switch { unsigned int min_dp_main_credits; unsigned int max_pcie_credits; unsigned int max_dma_credits; + enum tb_clx clx; }; /** @@ -354,6 +376,7 @@ enum tb_path_port { * when deactivating this path * @hops: Path hops * @path_length: How many hops the path uses + * @alloc_hopid: Does this path consume port HopID * * A path consists of a number of hops (see &struct tb_path_hop). To * establish a PCIe tunnel two paths have to be created between the two @@ -374,6 +397,7 @@ struct tb_path { bool clear_fc; struct tb_path_hop *hops; int path_length; + bool alloc_hopid; }; /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ @@ -740,6 +764,8 @@ void tb_switch_remove(struct tb_switch *sw); void tb_switch_suspend(struct tb_switch *sw, bool runtime); int tb_switch_resume(struct tb_switch *sw); int tb_switch_reset(struct tb_switch *sw); +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec); void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type); @@ -851,6 +877,20 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) return false; } +static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: + return true; + } + } + return false; +} + /** * tb_switch_is_usb4() - Is the switch USB4 compliant * @sw: Switch to check @@ -889,13 +929,64 @@ int tb_switch_tmu_init(struct tb_switch *sw); int tb_switch_tmu_post_time(struct tb_switch *sw); int tb_switch_tmu_disable(struct tb_switch *sw); int tb_switch_tmu_enable(struct tb_switch *sw); - -static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) +void tb_switch_tmu_configure(struct tb_switch *sw, + enum tb_switch_tmu_rate rate, + bool unidirectional); +/** + * tb_switch_tmu_hifi_is_enabled() - Checks if the specified TMU mode is enabled + * @sw: Router whose TMU mode to check + * @unidirectional: If uni-directional (bi-directional otherwise) + * + * Return true if hardware TMU configuration matches the one passed in + * as parameter. That is HiFi and either uni-directional or bi-directional. + */ +static inline bool tb_switch_tmu_hifi_is_enabled(const struct tb_switch *sw, + bool unidirectional) { return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI && - !sw->tmu.unidirectional; + sw->tmu.unidirectional == unidirectional; } +int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx); +int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx); + +/** + * tb_switch_is_clx_enabled() - Checks if the CLx is enabled + * @sw: Router to check the CLx state for + * + * Checks if the CLx is enabled on the router upstream link. + * Not applicable for a host router. + */ +static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw) +{ + return sw->clx != TB_CLX_DISABLE; +} + +/** + * tb_switch_is_cl0s_enabled() - Checks if the CL0s is enabled + * @sw: Router to check for the CL0s + * + * Checks if the CL0s is enabled on the router upstream link. + * Not applicable for a host router. + */ +static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw) +{ + return sw->clx == TB_CL0S; +} + +/** + * tb_switch_is_clx_supported() - Is CLx supported on this type of router + * @sw: The router to check CLx support for + */ +static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) +{ + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +int tb_switch_mask_clx_objections(struct tb_switch *sw); + +int tb_switch_pcie_l1_enable(struct tb_switch *sw); + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_clear_counter(struct tb_port *port, int counter); @@ -957,7 +1048,8 @@ int tb_dp_port_enable(struct tb_port *port, bool enable); struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, - struct tb_port **last, const char *name); + struct tb_port **last, const char *name, + bool alloc_hopid); struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, int link_nr, const char *name); @@ -988,6 +1080,7 @@ void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); void tb_lc_unconfigure_xdomain(struct tb_port *port); int tb_lc_start_lane_initialization(struct tb_port *port); +bool tb_lc_is_clx_supported(struct tb_port *port); int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); @@ -1074,6 +1167,7 @@ void usb4_port_unconfigure_xdomain(struct tb_port *port); int usb4_port_router_offline(struct tb_port *port); int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); +bool usb4_port_clx_supported(struct tb_port *port); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index bcabfcb2fd03..fe1afa44c56d 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -535,15 +535,25 @@ struct tb_xdp_header { u32 type; }; +struct tb_xdp_error_response { + struct tb_xdp_header hdr; + u32 error; +}; + struct tb_xdp_uuid { struct tb_xdp_header hdr; }; struct tb_xdp_uuid_response { - struct tb_xdp_header hdr; - uuid_t src_uuid; - u32 src_route_hi; - u32 src_route_lo; + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + u32 src_route_hi; + u32 src_route_lo; + }; + }; }; struct tb_xdp_properties { @@ -555,13 +565,18 @@ struct tb_xdp_properties { }; struct tb_xdp_properties_response { - struct tb_xdp_header hdr; - uuid_t src_uuid; - uuid_t dst_uuid; - u16 offset; - u16 data_length; - u32 generation; - u32 data[0]; + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + uuid_t dst_uuid; + u16 offset; + u16 data_length; + u32 generation; + u32 data[]; + }; + }; }; /* @@ -580,7 +595,10 @@ struct tb_xdp_properties_changed { }; struct tb_xdp_properties_changed_response { - struct tb_xdp_header hdr; + union { + struct tb_xdp_error_response err; + struct tb_xdp_header hdr; + }; }; enum tb_xdp_error { @@ -591,9 +609,4 @@ enum tb_xdp_error { ERROR_NOT_READY, }; -struct tb_xdp_error_response { - struct tb_xdp_header hdr; - u32 error; -}; - #endif diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 484f25be2849..a74f4878d3e7 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -33,7 +33,7 @@ enum tb_switch_cap { enum tb_switch_vse_cap { TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */ TB_VSE_CAP_TIME2 = 0x03, - TB_VSE_CAP_IECS = 0x04, + TB_VSE_CAP_CP_LP = 0x04, TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */ }; @@ -246,6 +246,7 @@ enum usb4_switch_op { #define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 #define TMU_RTR_CS_22 0x16 #define TMU_RTR_CS_24 0x18 +#define TMU_RTR_CS_25 0x19 enum tb_port_type { TB_TYPE_INACTIVE = 0x000000, @@ -305,16 +306,22 @@ struct tb_regs_port_header { /* TMU adapter registers */ #define TMU_ADP_CS_3 0x03 #define TMU_ADP_CS_3_UDM BIT(29) +#define TMU_ADP_CS_6 0x06 +#define TMU_ADP_CS_6_DTS BIT(1) /* Lane adapter registers */ #define LANE_ADP_CS_0 0x00 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 +#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26) +#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27) #define LANE_ADP_CS_1 0x01 #define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10) +#define LANE_ADP_CS_1_CL1_ENABLE BIT(11) #define LANE_ADP_CS_1_LD BIT(14) #define LANE_ADP_CS_1_LB BIT(15) #define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) @@ -323,6 +330,7 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 #define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 +#define LANE_ADP_CS_1_PMS BIT(30) /* USB4 port registers */ #define PORT_CS_1 0x01 @@ -338,6 +346,7 @@ struct tb_regs_port_header { #define PORT_CS_18 0x12 #define PORT_CS_18_BE BIT(8) #define PORT_CS_18_TCM BIT(9) +#define PORT_CS_18_CPS BIT(10) #define PORT_CS_18_WOU4S BIT(18) #define PORT_CS_19 0x13 #define PORT_CS_19_PC BIT(3) @@ -437,39 +446,79 @@ struct tb_regs_hop { u32 unknown3:3; /* set to zero */ } __packed; +/* TMU Thunderbolt 3 registers */ +#define TB_TIME_VSEC_3_CS_9 0x9 +#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16) +#define TB_TIME_VSEC_3_CS_26 0x1a +#define TB_TIME_VSEC_3_CS_26_TD BIT(22) + +/* + * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6 + * (see above) as in USB4 spec, but these specific bits used for Titan Ridge + * only and reserved in USB4 spec. + */ +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3) + +/* Plug Events registers */ +#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b +#define TB_PLUG_EVENTS_PCIE_CMD 0x1c +#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0) +#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10 +#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21) +#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22) +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2 +#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30) +#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d + +/* CP Low Power registers */ +#define TB_LOW_PWR_C1_CL1 0x1 +#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1) +#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3) +#define TB_LOW_PWR_C3_CL1 0x3 + /* Common link controller registers */ -#define TB_LC_DESC 0x02 -#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) -#define TB_LC_DESC_SIZE_SHIFT 8 -#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) -#define TB_LC_DESC_PORT_SIZE_SHIFT 16 -#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) -#define TB_LC_FUSE 0x03 -#define TB_LC_SNK_ALLOCATION 0x10 -#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) -#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 -#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 -#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) -#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 -#define TB_LC_POWER 0x740 +#define TB_LC_DESC 0x02 +#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) +#define TB_LC_DESC_SIZE_SHIFT 8 +#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) +#define TB_LC_DESC_PORT_SIZE_SHIFT 16 +#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) +#define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 +#define TB_LC_POWER 0x740 /* Link controller registers */ -#define TB_LC_PORT_ATTR 0x8d -#define TB_LC_PORT_ATTR_BE BIT(12) - -#define TB_LC_SX_CTRL 0x96 -#define TB_LC_SX_CTRL_WOC BIT(1) -#define TB_LC_SX_CTRL_WOD BIT(2) -#define TB_LC_SX_CTRL_WODPC BIT(3) -#define TB_LC_SX_CTRL_WODPD BIT(4) -#define TB_LC_SX_CTRL_WOU4 BIT(5) -#define TB_LC_SX_CTRL_WOP BIT(6) -#define TB_LC_SX_CTRL_L1C BIT(16) -#define TB_LC_SX_CTRL_L1D BIT(17) -#define TB_LC_SX_CTRL_L2C BIT(20) -#define TB_LC_SX_CTRL_L2D BIT(21) -#define TB_LC_SX_CTRL_SLI BIT(29) -#define TB_LC_SX_CTRL_UPSTREAM BIT(30) -#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + +#define TB_LC_SX_CTRL 0x96 +#define TB_LC_SX_CTRL_WOC BIT(1) +#define TB_LC_SX_CTRL_WOD BIT(2) +#define TB_LC_SX_CTRL_WODPC BIT(3) +#define TB_LC_SX_CTRL_WODPD BIT(4) +#define TB_LC_SX_CTRL_WOU4 BIT(5) +#define TB_LC_SX_CTRL_WOP BIT(6) +#define TB_LC_SX_CTRL_L1C BIT(16) +#define TB_LC_SX_CTRL_L1D BIT(17) +#define TB_LC_SX_CTRL_L2C BIT(20) +#define TB_LC_SX_CTRL_L2D BIT(21) +#define TB_LC_SX_CTRL_SLI BIT(29) +#define TB_LC_SX_CTRL_UPSTREAM BIT(30) +#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_LINK_ATTR 0x97 +#define TB_LC_LINK_ATTR_CPS BIT(18) #endif diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 039c42a06000..e4a07a26f693 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -115,6 +115,11 @@ static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port) return tb_port_tmu_set_unidirectional(port, false); } +static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, true); +} + static bool tb_port_tmu_is_unidirectional(struct tb_port *port) { int ret; @@ -128,23 +133,46 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port) return val & TMU_ADP_CS_3_UDM; } +static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync) +{ + u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0; + + return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val); +} + +static int tb_port_tmu_time_sync_disable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, true); +} + +static int tb_port_tmu_time_sync_enable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, false); +} + static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set) { + u32 val, offset, bit; int ret; - u32 val; - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, - sw->tmu.cap + TMU_RTR_CS_0, 1); + if (tb_switch_is_usb4(sw)) { + offset = sw->tmu.cap + TMU_RTR_CS_0; + bit = TMU_RTR_CS_0_TD; + } else { + offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26; + bit = TB_TIME_VSEC_3_CS_26_TD; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if (set) - val |= TMU_RTR_CS_0_TD; + val |= bit; else - val &= ~TMU_RTR_CS_0_TD; + val &= ~bit; - return tb_sw_write(sw, &val, TB_CFG_SWITCH, - sw->tmu.cap + TMU_RTR_CS_0, 1); + return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); } /** @@ -207,7 +235,8 @@ int tb_switch_tmu_init(struct tb_switch *sw) */ int tb_switch_tmu_post_time(struct tb_switch *sw) { - unsigned int post_local_time_offset, post_time_offset; + unsigned int post_time_high_offset, post_time_high = 0; + unsigned int post_local_time_offset, post_time_offset; struct tb_switch *root_switch = sw->tb->root_switch; u64 hi, mid, lo, local_time, post_time; int i, ret, retries = 100; @@ -247,6 +276,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw) post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22; post_time_offset = sw->tmu.cap + TMU_RTR_CS_24; + post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25; /* * Write the Grandmaster time to the Post Local Time registers @@ -258,17 +288,24 @@ int tb_switch_tmu_post_time(struct tb_switch *sw) goto out; /* - * Have the new switch update its local time (by writing 1 to - * the post_time registers) and wait for the completion of the - * same (post_time register becomes 0). This means the time has - * been converged properly. + * Have the new switch update its local time by: + * 1) writing 0x1 to the Post Time Low register and 0xffffffff to + * Post Time High register. + * 2) write 0 to Post Time High register and then wait for + * the completion of the post_time register becomes 0. + * This means the time has been converged properly. */ - post_time = 1; + post_time = 0xffffffff00000001ULL; ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2); if (ret) goto out; + ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH, + post_time_high_offset, 1); + if (ret) + goto out; + do { usleep_range(5, 10); ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH, @@ -297,30 +334,54 @@ out: */ int tb_switch_tmu_disable(struct tb_switch *sw) { - int ret; - - if (!tb_switch_is_usb4(sw)) + /* + * No need to disable TMU on devices that don't support CLx since + * on these devices e.g. Alpine Ridge and earlier, the TMU mode + * HiFi bi-directional is enabled by default and we don't change it. + */ + if (!tb_switch_is_clx_supported(sw)) return 0; /* Already disabled? */ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) return 0; - if (sw->tmu.unidirectional) { + + if (tb_route(sw)) { + bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true); struct tb_switch *parent = tb_switch_parent(sw); - struct tb_port *up, *down; + struct tb_port *down, *up; + int ret; - up = tb_upstream_port(sw); down = tb_port_at(tb_route(sw), parent); - - /* The switch may be unplugged so ignore any errors */ - tb_port_tmu_unidirectional_disable(up); - ret = tb_port_tmu_unidirectional_disable(down); + up = tb_upstream_port(sw); + /* + * In case of uni-directional time sync, TMU handshake is + * initiated by upstream router. In case of bi-directional + * time sync, TMU handshake is initiated by downstream router. + * Therefore, we change the rate to off in the respective + * router. + */ + if (unidirectional) + tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF); + else + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + tb_port_tmu_time_sync_disable(up); + ret = tb_port_tmu_time_sync_disable(down); if (ret) return ret; - } - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + if (unidirectional) { + /* The switch may be unplugged so ignore any errors */ + tb_port_tmu_unidirectional_disable(up); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + } + } else { + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + } sw->tmu.unidirectional = false; sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF; @@ -329,55 +390,231 @@ int tb_switch_tmu_disable(struct tb_switch *sw) return 0; } -/** - * tb_switch_tmu_enable() - Enable TMU on a switch - * @sw: Switch whose TMU to enable - * - * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode - * all tunneling should work. +static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *down, *up; + + down = tb_port_at(tb_route(sw), parent); + up = tb_upstream_port(sw); + /* + * In case of any failure in one of the steps when setting + * bi-directional or uni-directional TMU mode, get back to the TMU + * configurations in off mode. In case of additional failures in + * the functions below, ignore them since the caller shall already + * report a failure. + */ + tb_port_tmu_time_sync_disable(down); + tb_port_tmu_time_sync_disable(up); + if (unidirectional) + tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF); + else + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + tb_port_tmu_unidirectional_disable(down); + tb_port_tmu_unidirectional_disable(up); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_RATE_OFF. */ -int tb_switch_tmu_enable(struct tb_switch *sw) +static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw) { + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; int ret; - if (!tb_switch_is_usb4(sw)) - return 0; + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); - if (tb_switch_tmu_is_enabled(sw)) - return 0; + ret = tb_port_tmu_unidirectional_disable(up); + if (ret) + return ret; - ret = tb_switch_tmu_set_time_disruption(sw, true); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + goto out; + + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + __tb_switch_tmu_off(sw, false); + return ret; +} + +static int tb_switch_tmu_objection_mask(struct tb_switch *sw) +{ + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); if (ret) return ret; - /* Change mode to bi-directional */ - if (tb_route(sw) && sw->tmu.unidirectional) { - struct tb_switch *parent = tb_switch_parent(sw); - struct tb_port *up, *down; + val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), parent); + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); +} - ret = tb_port_tmu_unidirectional_disable(down); - if (ret) - return ret; +static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw) +{ + struct tb_port *up = tb_upstream_port(sw); - ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + return tb_port_tmu_write(up, TMU_ADP_CS_6, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_RATE_OFF. + */ +static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_unidirectional_enable(down); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + __tb_switch_tmu_off(sw, true); + return ret; +} + +static int tb_switch_tmu_hifi_enable(struct tb_switch *sw) +{ + bool unidirectional = sw->tmu.unidirectional_request; + int ret; + + if (unidirectional && !sw->tmu.has_ucap) + return -EOPNOTSUPP; + + /* + * No need to enable TMU on devices that don't support CLx since on + * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi + * bi-directional is enabled by default. + */ + if (!tb_switch_is_clx_supported(sw)) + return 0; + + if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request)) + return 0; + + if (tb_switch_is_titan_ridge(sw) && unidirectional) { + /* Titan Ridge supports only CL0s */ + if (!tb_switch_is_cl0s_enabled(sw)) + return -EOPNOTSUPP; + + ret = tb_switch_tmu_objection_mask(sw); if (ret) return ret; - ret = tb_port_tmu_unidirectional_disable(up); + ret = tb_switch_tmu_unidirectional_enable(sw); if (ret) return ret; + } + + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + if (tb_route(sw)) { + /* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */ + if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) { + if (unidirectional) + ret = __tb_switch_tmu_enable_unidirectional(sw); + else + ret = __tb_switch_tmu_enable_bidirectional(sw); + if (ret) + return ret; + } + sw->tmu.unidirectional = unidirectional; } else { + /* + * Host router port configurations are written as + * part of configurations for downstream port of the parent + * of the child node - see above. + * Here only the host router' rate configuration is written. + */ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); if (ret) return ret; } - sw->tmu.unidirectional = false; sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI; - tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); return tb_switch_tmu_set_time_disruption(sw, false); } + +/** + * tb_switch_tmu_enable() - Enable TMU on a router + * @sw: Router whose TMU to enable + * + * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode. + * Calling tb_switch_tmu_configure() is required before calling this function, + * to select the mode HiFi and directionality (uni-directional/bi-directional). + * In both modes all tunneling should work. Uni-directional mode is required for + * CLx (Link Low-Power) to work. + */ +int tb_switch_tmu_enable(struct tb_switch *sw) +{ + if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL) + return -EOPNOTSUPP; + + return tb_switch_tmu_hifi_enable(sw); +} + +/** + * tb_switch_tmu_configure() - Configure the TMU rate and directionality + * @sw: Router whose mode to change + * @rate: Rate to configure Off/LowRes/HiFi + * @unidirectional: If uni-directional (bi-directional otherwise) + * + * Selects the rate of the TMU and directionality (uni-directional or + * bi-directional). Must be called before tb_switch_tmu_enable(). + */ +void tb_switch_tmu_configure(struct tb_switch *sw, + enum tb_switch_tmu_rate rate, bool unidirectional) +{ + sw->tmu.unidirectional_request = unidirectional; + sw->tmu.rate_request = rate; +} diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index bb5cc480fc9a..a473cc7d9a8d 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -207,12 +207,14 @@ static int tb_pci_init_path(struct tb_path *path) * tb_tunnel_discover_pci() - Discover existing PCIe tunnels * @tb: Pointer to the domain structure * @down: PCIe downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the PCIe upstream * adapter and back. Returns the discovered tunnel or %NULL if there was * no tunnel. */ -struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_path *path; @@ -233,7 +235,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, - &tunnel->dst_port, "PCIe Up"); + &tunnel->dst_port, "PCIe Up", alloc_hopid); if (!path) { /* Just disable the downstream port */ tb_pci_port_enable(down, false); @@ -244,7 +246,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) goto err_free; path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, - "PCIe Down"); + "PCIe Down", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_PCI_PATH_DOWN] = path; @@ -761,6 +763,7 @@ static int tb_dp_init_video_path(struct tb_path *path) * tb_tunnel_discover_dp() - Discover existing Display Port tunnels * @tb: Pointer to the domain structure * @in: DP in adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @in adapter is active, follows the tunnel to the DP out adapter * and back. Returns the discovered tunnel or %NULL if there was no @@ -768,7 +771,8 @@ static int tb_dp_init_video_path(struct tb_path *path) * * Return: DP tunnel or %NULL if no tunnel found. */ -struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_port *port; @@ -787,7 +791,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) tunnel->src_port = in; path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, - &tunnel->dst_port, "Video"); + &tunnel->dst_port, "Video", alloc_hopid); if (!path) { /* Just disable the DP IN port */ tb_dp_port_enable(in, false); @@ -797,14 +801,15 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) goto err_free; - path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX"); + path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", + alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_OUT] = path; tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, - &port, "AUX RX"); + &port, "AUX RX", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_IN] = path; @@ -1343,12 +1348,14 @@ static void tb_usb3_init_path(struct tb_path *path) * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels * @tb: Pointer to the domain structure * @down: USB3 downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the USB3 upstream * adapter and back. Returns the discovered tunnel or %NULL if there was * no tunnel. */ -struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_path *path; @@ -1369,7 +1376,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, - &tunnel->dst_port, "USB3 Down"); + &tunnel->dst_port, "USB3 Down", alloc_hopid); if (!path) { /* Just disable the downstream port */ tb_usb3_port_enable(down, false); @@ -1379,7 +1386,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, - "USB3 Up"); + "USB3 Up", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_USB3_PATH_UP] = path; diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index eea14e24f7e0..03e56076b5bc 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -64,10 +64,12 @@ struct tb_tunnel { int allocated_down; }; -struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down); +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down); -struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in); +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_port *out, int max_up, int max_down); @@ -77,7 +79,8 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, int receive_ring); bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, int transmit_ring, int receive_path, int receive_ring); -struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_port *down, int max_up, int max_down); diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index ceddbe7e9f93..3a2e7126db9d 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -50,28 +50,6 @@ enum usb4_ba_index { #define USB4_BA_VALUE_MASK GENMASK(31, 16) #define USB4_BA_VALUE_SHIFT 16 -static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, - u32 value, int timeout_msec) -{ - ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); - - do { - u32 val; - int ret; - - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); - if (ret) - return ret; - - if ((val & bit) == value) - return 0; - - usleep_range(50, 100); - } while (ktime_before(ktime_get(), timeout)); - - return -ETIMEDOUT; -} - static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status, const void *tx_data, size_t tx_dwords, @@ -97,7 +75,7 @@ static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, if (ret) return ret; - ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); + ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); if (ret) return ret; @@ -303,8 +281,8 @@ int usb4_switch_setup(struct tb_switch *sw) if (ret) return ret; - return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, - ROUTER_CS_6_CR, 50); + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, + ROUTER_CS_6_CR, 50); } /** @@ -480,8 +458,8 @@ int usb4_switch_set_sleep(struct tb_switch *sw) if (ret) return ret; - return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, - ROUTER_CS_6_SLPR, 500); + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, + ROUTER_CS_6_SLPR, 500); } /** @@ -1386,6 +1364,26 @@ int usb4_port_enumerate_retimers(struct tb_port *port) USB4_SB_OPCODE, &val, sizeof(val)); } +/** + * usb4_port_clx_supported() - Check if CLx is supported by the link + * @port: Port to check for CLx support for + * + * PORT_CS_18_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool usb4_port_clx_supported(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_CPS); +} + static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, enum usb4_sb_opcode opcode, int timeout_msec) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index eff32499610f..01d6b724ca51 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -214,16 +214,12 @@ static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); } -static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) +static int tb_xdp_handle_error(const struct tb_xdp_error_response *res) { - const struct tb_xdp_error_response *error; - - if (hdr->type != ERROR_RESPONSE) + if (res->hdr.type != ERROR_RESPONSE) return 0; - error = (const struct tb_xdp_error_response *)hdr; - - switch (error->error) { + switch (res->error) { case ERROR_UNKNOWN_PACKET: case ERROR_UNKNOWN_DOMAIN: return -EIO; @@ -257,7 +253,7 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, if (ret) return ret; - ret = tb_xdp_handle_error(&res.hdr); + ret = tb_xdp_handle_error(&res.err); if (ret) return ret; @@ -329,7 +325,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, if (ret) goto err; - ret = tb_xdp_handle_error(&res->hdr); + ret = tb_xdp_handle_error(&res->err); if (ret) goto err; @@ -462,7 +458,7 @@ static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, if (ret) return ret; - return tb_xdp_handle_error(&res.hdr); + return tb_xdp_handle_error(&res.err); } static int |
