summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-03-02 20:14:03 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-03-02 20:14:03 +0100
commit73473b3033a633d640ef065aa98d60fbe2d40ddb (patch)
tree0c5178fbeed3c512ab128b9e1d772d37f2c70952 /drivers/thunderbolt
parentMerge v6.8-rc6 into usb-next (diff)
parentthunderbolt: Constify the struct device_type usage (diff)
downloadlinux-73473b3033a633d640ef065aa98d60fbe2d40ddb.tar.xz
linux-73473b3033a633d640ef065aa98d60fbe2d40ddb.zip
Merge tag 'thunderbolt-for-v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next
Mika writes: thunderbolt: Changes for v6.9 merge window This includes following USB4/Thunderbolt changes for the v6.9 merge window: - Reset the topology also for USB4 v1 routers on driver load - DisplayPort tunneling and bandwidth allocation mode improvements - Tracepoint support for the control channel - Couple of minor fixes and cleanups. All these have been in linux-next with no reported issues. * tag 'thunderbolt-for-v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (23 commits) thunderbolt: Constify the struct device_type usage thunderbolt: Add trace events support for the control channel thunderbolt: Keep the domain powered when USB4 port is in redrive mode thunderbolt: Improve DisplayPort tunnel setup process to be more robust thunderbolt: Calculate DisplayPort tunnel bandwidth after DPRX capabilities read thunderbolt: Reserve released DisplayPort bandwidth for a group for 10 seconds thunderbolt: Introduce tb_tunnel_direction_downstream() thunderbolt: Re-order bandwidth group functions thunderbolt: Fail the failed bandwidth request properly thunderbolt: Log an error if DPTX request is not cleared thunderbolt: Handle bandwidth allocation mode disable request thunderbolt: Re-calculate estimated bandwidth when allocation mode is enabled thunderbolt: Use DP_LOCAL_CAP for maximum bandwidth calculation thunderbolt: Correct typo in host_reset parameter thunderbolt: Skip discovery also in USB4 v2 host thunderbolt: Reset only non-USB4 host routers in resume thunderbolt: Remove usage of the deprecated ida_simple_xx() API thunderbolt: Fix rollback in tb_port_lane_bonding_enable() for lane 1 thunderbolt: Fix XDomain rx_lanes_show and tx_lanes_show thunderbolt: Reset topology created by the boot firmware ...
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c19
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c11
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/path.c13
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/switch.c140
-rw-r--r--drivers/thunderbolt/tb.c890
-rw-r--r--drivers/thunderbolt/tb.h29
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/trace.h188
-rw-r--r--drivers/thunderbolt/tunnel.c96
-rw-r--r--drivers/thunderbolt/tunnel.h6
-rw-r--r--drivers/thunderbolt/usb4.c43
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c16
21 files changed, 1111 insertions, 439 deletions
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index c8b3d7b78098..b44b32dcb832 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I$(src)
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d997a4c545f7..4bdb2d45e0bf 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -15,6 +15,8 @@
#include "ctl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
@@ -32,6 +34,7 @@
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
+ * @index: Domain number. This will be output with the trace record.
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -47,6 +50,8 @@ struct tb_ctl {
int timeout_msec;
event_cb callback;
void *callback_data;
+
+ int index;
};
@@ -369,6 +374,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
+
+ trace_tb_tx(ctl->index, type, data, len);
+
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
@@ -384,6 +392,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
+ trace_tb_event(ctl->index, type, pkg->buffer, size);
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
@@ -489,6 +498,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
+
+ trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
+
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
@@ -614,6 +626,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
+ * @index: Domain number
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
@@ -622,14 +635,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
+
ctl->nhi = nhi;
+ ctl->index = index;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index eec5c953c743..bf930a191472 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -21,8 +21,8 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9fb1a64f3300..0023017299f7 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
-struct device_type tb_domain_type = {
+const struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
@@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
tb->nhi = nhi;
mutex_init(&tb->lock);
- tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
@@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
if (!tb->wq)
goto err_remove_ida;
- tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
+ tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
@@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
@@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
+
+ if (tb->cm_ops->deinit)
+ tb->cm_ops->deinit(tb);
+
device_unregister(&tb->dev);
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 56790d50f9e3..baf10d099c77 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe9b0..63cb4b6afb71 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index fb4f46e51753..7af2642b97cb 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -48,7 +48,7 @@
static bool host_reset = true;
module_param(host_reset, bool, 0444);
-MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
@@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
-
nhi_reset(nhi);
res = nhi_init_msi(nhi);
@@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 69fb3b0fa34f..8901db2de327 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
if (!nvm)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvm_ida, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
@@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
- ida_simple_remove(&nvm_ida, nvm->id);
+ ida_free(&nvm_ida, nvm->id);
}
kfree(nvm);
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbdbdc..f760e54cd9bd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b40ae..e81de9c30eac 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index d49d6628dbf2..6bb49bdcd6c1 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -356,7 +356,7 @@ static void tb_retimer_release(struct device *dev)
kfree(rt);
}
-struct device_type tb_retimer_type = {
+const struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 900114ba4371..5a617ea285a7 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
- return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+ return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
}
/**
@@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->in_hopids, hopid);
+ ida_free(&port->in_hopids, hopid);
}
/**
@@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->out_hopids, hopid);
+ ida_free(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
@@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
- goto err_lane0;
+ goto err_lane1;
}
/*
@@ -1531,29 +1538,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
+
+ return ret;
}
/**
@@ -2225,7 +2327,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
NULL)
};
-struct device_type tb_switch_type = {
+const struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 846d2813bb1a..c5ce7a694b27 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,7 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -75,112 +76,6 @@ struct tb_hotplug_event {
bool unplug;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -472,34 +367,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -681,6 +548,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +560,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +573,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +586,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +612,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1181,8 +1072,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1079,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1118,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,7 +1194,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
@@ -1464,6 +1352,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1670,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1605,101 +1784,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1737,49 +1821,15 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
-
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
-
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
- }
-
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
- }
-
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
-
- /*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
@@ -1839,15 +1889,19 @@ static bool tb_tunnel_one_dp(struct tb *tb)
goto err_free;
}
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
+ if (ret)
+ goto err_deactivate;
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* Transition the links to asymmetric if the consumption exceeds
* the threshold.
*/
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1859,6 +1913,8 @@ static bool tb_tunnel_one_dp(struct tb *tb)
tb_increase_tmu_accuracy(tunnel);
return true;
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
@@ -1878,13 +1934,86 @@ err_rpm_put:
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+}
+
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1903,7 +2032,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1930,6 +2062,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -2243,8 +2376,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2270,11 +2405,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2304,24 +2439,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2332,7 +2491,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2344,11 +2503,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2356,15 +2519,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2372,6 +2548,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
@@ -2379,11 +2567,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2406,32 +2594,48 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
+
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2560,6 +2764,16 @@ static void tb_stop(struct tb *tb)
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2581,9 +2795,10 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2622,12 +2837,28 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2698,8 +2929,12 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
@@ -2847,6 +3082,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 997c5a536905..feed8ecaf712 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -217,6 +219,11 @@ struct tb_switch {
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
+ * @reserved: Bandwidth released by one tunnel in the group, available
+ * to others. This is reported as part of estimated_bw for
+ * the group.
+ * @release_work: Worker to release the @reserved if it is not used by
+ * any of the tunnels.
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
@@ -227,6 +234,8 @@ struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
+ int reserved;
+ struct delayed_work release_work;
};
/**
@@ -258,6 +267,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -286,6 +296,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -452,6 +463,8 @@ struct tb_path {
* ICM to send driver ready message to the firmware.
* @start: Starts the domain
* @stop: Stops the domain
+ * @deinit: Perform any cleanup after the domain is stopped but before
+ * it is unregistered. Called without @tb->lock taken. Optional.
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
@@ -483,8 +496,9 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
+ void (*deinit)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
@@ -735,10 +749,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct device_type tb_domain_type;
-extern struct device_type tb_retimer_type;
-extern struct device_type tb_switch_type;
-extern struct device_type usb4_port_device_type;
+extern const struct device_type tb_domain_type;
+extern const struct device_type tb_retimer_type;
+extern const struct device_type tb_switch_type;
+extern const struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -746,7 +760,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -1150,6 +1164,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1169,6 +1184,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1301,6 +1317,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2b84..4e43b47f9f11 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h
new file mode 100644
index 000000000000..4dccfcf7af6a
--- /dev/null
+++ b/drivers/thunderbolt/trace.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt tracing support
+ *
+ * Copyright (C) 2024, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Gil Fine <gil.fine@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt
+
+#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define TB_TRACE_H_
+
+#include <linux/trace_seq.h>
+#include <linux/tracepoint.h>
+
+#include "tb_msgs.h"
+
+#define tb_cfg_type_name(type) { type, #type }
+#define show_type_name(val) \
+ __print_symbolic(val, \
+ tb_cfg_type_name(TB_CFG_PKG_READ), \
+ tb_cfg_type_name(TB_CFG_PKG_WRITE), \
+ tb_cfg_type_name(TB_CFG_PKG_ERROR), \
+ tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
+ tb_cfg_type_name(TB_CFG_PKG_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
+ tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
+ tb_cfg_type_name(TB_CFG_PKG_RESET), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
+
+#ifndef TB_TRACE_HELPERS
+#define TB_TRACE_HELPERS
+static inline const char *show_data_read_write(struct trace_seq *p,
+ const u32 *data)
+{
+ const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
+ msg->addr.offset, msg->addr.length, msg->addr.port,
+ msg->addr.space, msg->addr.seq);
+
+ return ret;
+}
+
+static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
+ msg->port, msg->pg);
+
+ return ret;
+}
+
+static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
+
+ return ret;
+}
+
+static inline const char *show_route(struct trace_seq *p, const u32 *data)
+{
+ const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
+
+ return ret;
+}
+
+static inline const char *show_data(struct trace_seq *p, u8 type,
+ const u32 *data, u32 length)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ int i;
+
+ show_route(p, data);
+
+ switch (type) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ show_data_read_write(p, data);
+ break;
+
+ case TB_CFG_PKG_ERROR:
+ show_data_error(p, data);
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ show_data_event(p, data);
+ break;
+
+ default:
+ break;
+ }
+
+ trace_seq_printf(p, "data=[");
+ for (i = 0; i < length; i++) {
+ trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
+ prefix = ", ";
+ }
+ trace_seq_printf(p, "]");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
+DECLARE_EVENT_CLASS(tb_raw,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ ),
+ TP_printk("type=%s, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+DEFINE_EVENT(tb_raw, tb_tx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+DEFINE_EVENT(tb_raw, tb_event,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+TRACE_EVENT(tb_rx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
+ TP_ARGS(index, type, data, size, dropped),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ __field(bool, dropped)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ __entry->dropped = dropped;
+ ),
+ TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->dropped,
+ __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+#endif /* TB_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d..cb6609a56a03 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
@@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_wait_dprx(tunnel, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
@@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index b4cff5482112..1a27ccd08b86 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
+static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
+{
+ return tb_port_path_direction_downstream(tunnel->src_port,
+ tunnel->dst_port);
+}
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8cc3e..9860b49d7a2b 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1113,6 +1113,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -2819,8 +2858,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
- if (val & ADP_DP_CS_8_DR)
+ if (val & ADP_DP_CS_8_DR) {
+ tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
return -ETIMEDOUT;
+ }
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index e355bfd6343f..5150879888ca 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
kfree(usb4);
}
-struct device_type usb4_port_device_type = {
+const struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9495742913d5..940ae97987ff 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
@@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
@@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
@@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,