summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/cap.c1
-rw-r--r--drivers/thunderbolt/ctl.c47
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c197
-rw-r--r--drivers/thunderbolt/eeprom.c1
-rw-r--r--drivers/thunderbolt/icm.c218
-rw-r--r--drivers/thunderbolt/nhi.c412
-rw-r--r--drivers/thunderbolt/nhi.h142
-rw-r--r--drivers/thunderbolt/nhi_regs.h12
-rw-r--r--drivers/thunderbolt/path.c1
-rw-r--r--drivers/thunderbolt/property.c670
-rw-r--r--drivers/thunderbolt/switch.c8
-rw-r--r--drivers/thunderbolt/tb.c1
-rw-r--r--drivers/thunderbolt/tb.h89
-rw-r--r--drivers/thunderbolt/tb_msgs.h168
-rw-r--r--drivers/thunderbolt/tb_regs.h1
-rw-r--r--drivers/thunderbolt/tunnel_pci.c1
-rw-r--r--drivers/thunderbolt/tunnel_pci.h1
-rw-r--r--drivers/thunderbolt/xdomain.c1570
20 files changed, 3182 insertions, 364 deletions
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 4900febc6c8a..f2f0de27252b 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 38bc27a5ce4f..c2277b8ee88d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - capabilities lookup
*
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index fb40dd0588b9..37a7f4c735d0 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
@@ -289,20 +290,6 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
}
}
-static void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
-{
- int i;
- for (i = 0; i < len; i++)
- dst[i] = cpu_to_be32(src[i]);
-}
-
-static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
-{
- int i;
- for (i = 0; i < len; i++)
- dst[i] = be32_to_cpu(src[i]);
-}
-
static __be32 tb_crc(const void *data, size_t len)
{
return cpu_to_be32(~__crc32c_le(~0, data, len));
@@ -373,7 +360,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
- res = ring_tx(ctl->tx, &pkg->frame);
+ res = tb_ring_tx(ctl->tx, &pkg->frame);
if (res) /* ring is stopped */
tb_ctl_pkg_free(pkg);
return res;
@@ -382,15 +369,15 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
/**
* tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
*/
-static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
- ctl->callback(ctl->callback_data, type, pkg->buffer, size);
+ return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
{
- ring_rx(pkg->ctl->rx, &pkg->frame); /*
+ tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
* We ignore failures during stop.
* All rx packets are referenced
* from ctl->rx_packets, so we do
@@ -458,6 +445,8 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
break;
case TB_CFG_PKG_EVENT:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ case TB_CFG_PKG_XDOMAIN_REQ:
if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl,
"RX: checksum mismatch, dropping packet\n");
@@ -465,8 +454,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
}
/* Fall through */
case TB_CFG_PKG_ICM_EVENT:
- tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
- goto rx;
+ if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
+ goto rx;
+ break;
default:
break;
@@ -625,11 +615,12 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
if (!ctl->frame_pool)
goto err;
- ctl->tx = ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+ ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
if (!ctl->tx)
goto err;
- ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+ ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
+ 0xffff, NULL, NULL);
if (!ctl->rx)
goto err;
@@ -662,9 +653,9 @@ void tb_ctl_free(struct tb_ctl *ctl)
return;
if (ctl->rx)
- ring_free(ctl->rx);
+ tb_ring_free(ctl->rx);
if (ctl->tx)
- ring_free(ctl->tx);
+ tb_ring_free(ctl->tx);
/* free RX packets */
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -683,8 +674,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
tb_ctl_info(ctl, "control channel starting...\n");
- ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
- ring_start(ctl->rx);
+ tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
+ tb_ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
@@ -705,8 +696,8 @@ void tb_ctl_stop(struct tb_ctl *ctl)
ctl->running = false;
mutex_unlock(&ctl->request_queue_lock);
- ring_stop(ctl->rx);
- ring_stop(ctl->tx);
+ tb_ring_stop(ctl->rx);
+ tb_ring_stop(ctl->tx);
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 36fd28b1c1c5..3062e0b5f71e 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
@@ -8,6 +9,7 @@
#define _TB_CFG
#include <linux/kref.h>
+#include <linux/thunderbolt.h>
#include "nhi.h"
#include "tb_msgs.h"
@@ -15,7 +17,7 @@
/* control channel */
struct tb_ctl;
-typedef void (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9f2dcd48974d..9b90115319ce 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -20,6 +20,98 @@
static DEFINE_IDA(tb_domain_ida);
+static bool match_service_id(const struct tb_service_id *id,
+ const struct tb_service *svc)
+{
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
+ if (strcmp(id->protocol_key, svc->key))
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
+ if (id->protocol_id != svc->prtcid)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_version != svc->prtcvers)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_revision != svc->prtcrevs)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct tb_service_id *__tb_service_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct tb_service_driver *driver;
+ const struct tb_service_id *ids;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return NULL;
+
+ driver = container_of(drv, struct tb_service_driver, driver);
+ if (!driver->id_table)
+ return NULL;
+
+ for (ids = driver->id_table; ids->match_flags != 0; ids++) {
+ if (match_service_id(ids, svc))
+ return ids;
+ }
+
+ return NULL;
+}
+
+static int tb_service_match(struct device *dev, struct device_driver *drv)
+{
+ return !!__tb_service_match(dev, drv);
+}
+
+static int tb_service_probe(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+ const struct tb_service_id *id;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ id = __tb_service_match(dev, &driver->driver);
+
+ return driver->probe(svc, id);
+}
+
+static int tb_service_remove(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->remove)
+ driver->remove(svc);
+
+ return 0;
+}
+
+static void tb_service_shutdown(struct device *dev)
+{
+ struct tb_service_driver *driver;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc || !dev->driver)
+ return;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->shutdown)
+ driver->shutdown(svc);
+}
+
static const char * const tb_security_names[] = {
[TB_SECURITY_NONE] = "none",
[TB_SECURITY_USER] = "user",
@@ -52,6 +144,10 @@ static const struct attribute_group *domain_attr_groups[] = {
struct bus_type tb_bus_type = {
.name = "thunderbolt",
+ .match = tb_service_match,
+ .probe = tb_service_probe,
+ .remove = tb_service_remove,
+ .shutdown = tb_service_shutdown,
};
static void tb_domain_release(struct device *dev)
@@ -128,17 +224,26 @@ err_free:
return NULL;
}
-static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
- return;
+ return true;
}
- tb->cm_ops->handle_event(tb, type, buf, size);
+ switch (type) {
+ case TB_CFG_PKG_XDOMAIN_REQ:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ return tb_xdomain_handle_request(tb, type, buf, size);
+
+ default:
+ tb->cm_ops->handle_event(tb, type, buf, size);
+ }
+
+ return true;
}
/**
@@ -443,9 +548,92 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
return tb->cm_ops->disconnect_pcie_paths(tb);
}
+/**
+ * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
+ * @tb: Domain enabling the DMA paths
+ * @xd: XDomain DMA paths are created to
+ *
+ * Calls connection manager specific method to enable DMA paths to the
+ * XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->approve_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->approve_xdomain_paths(tb, xd);
+}
+
+/**
+ * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
+ * @tb: Domain disabling the DMA paths
+ * @xd: XDomain whose DMA paths are disconnected
+ *
+ * Calls connection manager specific method to disconnect DMA paths to
+ * the XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->disconnect_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
+}
+
+static int disconnect_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+ struct tb *tb = data;
+ int ret = 0;
+
+ xd = tb_to_xdomain(dev);
+ if (xd && xd->tb == tb)
+ ret = tb_xdomain_disable_paths(xd);
+
+ return ret;
+}
+
+/**
+ * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
+ * @tb: Domain whose paths are disconnected
+ *
+ * This function can be used to disconnect all paths (PCIe, XDomain) for
+ * example in preparation for host NVM firmware upgrade. After this is
+ * called the paths cannot be established without resetting the switch.
+ *
+ * Return: %0 in case of success and negative errno otherwise.
+ */
+int tb_domain_disconnect_all_paths(struct tb *tb)
+{
+ int ret;
+
+ ret = tb_domain_disconnect_pcie_paths(tb);
+ if (ret)
+ return ret;
+
+ return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
+}
+
int tb_domain_init(void)
{
- return bus_register(&tb_bus_type);
+ int ret;
+
+ ret = tb_xdomain_init();
+ if (ret)
+ return ret;
+ ret = bus_register(&tb_bus_type);
+ if (ret)
+ tb_xdomain_exit();
+
+ return ret;
}
void tb_domain_exit(void)
@@ -453,4 +641,5 @@ void tb_domain_exit(void)
bus_unregister(&tb_bus_type);
ida_destroy(&tb_domain_ida);
tb_switch_exit();
+ tb_xdomain_exit();
}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index fe2f00ceafc5..3e8caf22c294 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - eeprom access
*
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 53250fc057e1..ab02d13f40b7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -60,6 +60,8 @@
* @get_route: Find a route string for given switch
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
+ * @xdomain_connected - Handle XDomain connected ICM message
+ * @xdomain_disconnected - Handle XDomain disconnected ICM message
*/
struct icm {
struct mutex request_lock;
@@ -74,6 +76,10 @@ struct icm {
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
+ void (*xdomain_connected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*xdomain_disconnected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
};
struct icm_notification {
@@ -89,7 +95,10 @@ static inline struct tb *icm_to_tb(struct icm *icm)
static inline u8 phy_port_from_route(u64 route, u8 depth)
{
- return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
+ u8 link;
+
+ link = depth ? route >> ((depth - 1) * 8) : route;
+ return tb_phy_port_from_link(link);
}
static inline u8 dual_link_from_link(u8 link)
@@ -320,6 +329,51 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0;
}
+static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct icm_fr_pkg_approve_xdomain_response reply;
+ struct icm_fr_pkg_approve_xdomain request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.hdr.code = ICM_APPROVE_XDOMAIN;
+ request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
+ memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+ request.transmit_path = xd->transmit_path;
+ request.transmit_ring = xd->transmit_ring;
+ request.receive_path = xd->receive_path;
+ request.receive_ring = xd->receive_ring;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ u8 phy_port;
+ u8 cmd;
+
+ phy_port = tb_phy_port_from_link(xd->link);
+ if (phy_port == 0)
+ cmd = NHI_MAILBOX_DISCONNECT_PA;
+ else
+ cmd = NHI_MAILBOX_DISCONNECT_PB;
+
+ nhi_mailbox_cmd(tb->nhi, cmd, 1);
+ usleep_range(10, 50);
+ nhi_mailbox_cmd(tb->nhi, cmd, 2);
+ return 0;
+}
+
static void remove_switch(struct tb_switch *sw)
{
struct tb_switch *parent_sw;
@@ -475,6 +529,141 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb_switch_put(sw);
}
+static void remove_xdomain(struct tb_xdomain *xd)
+{
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ tb_port_at(xd->route, sw)->xdomain = NULL;
+ tb_xdomain_remove(xd);
+}
+
+static void
+icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_connected *pkg =
+ (const struct icm_fr_event_xdomain_connected *)hdr;
+ struct tb_xdomain *xd;
+ struct tb_switch *sw;
+ u8 link, depth;
+ bool approved;
+ u64 route;
+
+ /*
+ * After NVM upgrade adding root switch device fails because we
+ * initiated reset. During that time ICM might still send
+ * XDomain connected message which we ignore here.
+ */
+ if (!tb->root_switch)
+ return;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+ approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
+
+ if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+ tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+ return;
+ }
+
+ route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ u8 xd_phy_port, phy_port;
+
+ xd_phy_port = phy_port_from_route(xd->route, xd->depth);
+ phy_port = phy_port_from_route(route, depth);
+
+ if (xd->depth == depth && xd_phy_port == phy_port) {
+ xd->link = link;
+ xd->route = route;
+ xd->is_unplugged = false;
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ /*
+ * If we find an existing XDomain connection remove it
+ * now. We need to go through login handshake and
+ * everything anyway to be able to re-establish the
+ * connection.
+ */
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * Look if there already exists an XDomain in the same place
+ * than the new one and in that case remove it because it is
+ * most likely another host that got disconnected.
+ */
+ xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+ if (!xd) {
+ u8 dual_link;
+
+ dual_link = dual_link_from_link(link);
+ if (dual_link)
+ xd = tb_xdomain_find_by_link_depth(tb, dual_link,
+ depth);
+ }
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * If the user disconnected a switch during suspend and
+ * connected another host to the same port, remove the switch
+ * first.
+ */
+ sw = get_switch_at_route(tb->root_switch, route);
+ if (sw)
+ remove_switch(sw);
+
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+ depth);
+ return;
+ }
+
+ xd = tb_xdomain_alloc(sw->tb, &sw->dev, route,
+ &pkg->local_uuid, &pkg->remote_uuid);
+ if (!xd) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ xd->link = link;
+ xd->depth = depth;
+
+ tb_port_at(route, sw)->xdomain = xd;
+
+ tb_xdomain_add(xd);
+ tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_disconnected *pkg =
+ (const struct icm_fr_event_xdomain_disconnected *)hdr;
+ struct tb_xdomain *xd;
+
+ /*
+ * If the connection is through one or multiple devices, the
+ * XDomain device is removed along with them so it is fine if we
+ * cannot find it here.
+ */
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+}
+
static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
{
struct pci_dev *parent;
@@ -594,6 +783,12 @@ static void icm_handle_notification(struct work_struct *work)
case ICM_EVENT_DEVICE_DISCONNECTED:
icm->device_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
}
mutex_unlock(&tb->lock);
@@ -927,6 +1122,10 @@ static void icm_unplug_children(struct tb_switch *sw)
if (tb_is_upstream_port(port))
continue;
+ if (port->xdomain) {
+ port->xdomain->is_unplugged = true;
+ continue;
+ }
if (!port->remote)
continue;
@@ -943,6 +1142,13 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
if (tb_is_upstream_port(port))
continue;
+
+ if (port->xdomain && port->xdomain->is_unplugged) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ continue;
+ }
+
if (!port->remote)
continue;
@@ -1009,8 +1215,10 @@ static int icm_start(struct tb *tb)
tb->root_switch->no_nvm_upgrade = x86_apple_machine;
ret = tb_switch_add(tb->root_switch);
- if (ret)
+ if (ret) {
tb_switch_put(tb->root_switch);
+ tb->root_switch = NULL;
+ }
return ret;
}
@@ -1042,6 +1250,8 @@ static const struct tb_cm_ops icm_fr_ops = {
.add_switch_key = icm_fr_add_switch_key,
.challenge_switch_key = icm_fr_challenge_switch_key,
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+ .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
};
struct tb *icm_probe(struct tb_nhi *nhi)
@@ -1064,6 +1274,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->get_route = icm_fr_get_route;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_fr_ops;
break;
@@ -1077,6 +1289,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->get_route = icm_ar_get_route;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_fr_ops;
break;
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 05af126a2435..419a7a90bce0 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -22,6 +22,14 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
/*
+ * Used to enable end-to-end workaround for missing RX packets. Do not
+ * use this ring for anything else.
+ */
+#define RING_E2E_UNUSED_HOPID 2
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define RING_FIRST_USABLE_HOPID 8
+
+/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
*/
@@ -206,8 +214,10 @@ static void ring_work(struct work_struct *work)
struct tb_ring *ring = container_of(work, typeof(*ring), work);
struct ring_frame *frame;
bool canceled = false;
+ unsigned long flags;
LIST_HEAD(done);
- mutex_lock(&ring->lock);
+
+ spin_lock_irqsave(&ring->lock, flags);
if (!ring->running) {
/* Move all frames to done and mark them as canceled. */
@@ -229,30 +239,14 @@ static void ring_work(struct work_struct *work)
frame->eof = ring->descriptors[ring->tail].eof;
frame->sof = ring->descriptors[ring->tail].sof;
frame->flags = ring->descriptors[ring->tail].flags;
- if (frame->sof != 0)
- dev_WARN(&ring->nhi->pdev->dev,
- "%s %d got unexpected SOF: %#x\n",
- RING_TYPE(ring), ring->hop,
- frame->sof);
- /*
- * known flags:
- * raw not enabled, interupt not set: 0x2=0010
- * raw enabled: 0xa=1010
- * raw not enabled: 0xb=1011
- * partial frame (>MAX_FRAME_SIZE): 0xe=1110
- */
- if (frame->flags != 0xa)
- dev_WARN(&ring->nhi->pdev->dev,
- "%s %d got unexpected flags: %#x\n",
- RING_TYPE(ring), ring->hop,
- frame->flags);
}
ring->tail = (ring->tail + 1) % ring->size;
}
ring_write_descriptors(ring);
invoke_callback:
- mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
+ /* allow callbacks to schedule new work */
+ spin_unlock_irqrestore(&ring->lock, flags);
while (!list_empty(&done)) {
frame = list_first_entry(&done, typeof(*frame), list);
/*
@@ -260,29 +254,128 @@ invoke_callback:
* Do not hold on to it.
*/
list_del_init(&frame->list);
- frame->callback(ring, frame, canceled);
+ if (frame->callback)
+ frame->callback(ring, frame, canceled);
}
}
-int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
+int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
{
+ unsigned long flags;
int ret = 0;
- mutex_lock(&ring->lock);
+
+ spin_lock_irqsave(&ring->lock, flags);
if (ring->running) {
list_add_tail(&frame->list, &ring->queue);
ring_write_descriptors(ring);
} else {
ret = -ESHUTDOWN;
}
- mutex_unlock(&ring->lock);
+ spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
+
+/**
+ * tb_ring_poll() - Poll one completed frame from the ring
+ * @ring: Ring to poll
+ *
+ * This function can be called when @start_poll callback of the @ring
+ * has been called. It will read one completed frame from the ring and
+ * return it to the caller. Returns %NULL if there is no more completed
+ * frames.
+ */
+struct ring_frame *tb_ring_poll(struct tb_ring *ring)
+{
+ struct ring_frame *frame = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (!ring->running)
+ goto unlock;
+ if (ring_empty(ring))
+ goto unlock;
+
+ if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
+ frame = list_first_entry(&ring->in_flight, typeof(*frame),
+ list);
+ list_del_init(&frame->list);
+
+ if (!ring->is_tx) {
+ frame->size = ring->descriptors[ring->tail].length;
+ frame->eof = ring->descriptors[ring->tail].eof;
+ frame->sof = ring->descriptors[ring->tail].sof;
+ frame->flags = ring->descriptors[ring->tail].flags;
+ }
+
+ ring->tail = (ring->tail + 1) % ring->size;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return frame;
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll);
+
+static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
+{
+ int idx = ring_interrupt_index(ring);
+ int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
+ int bit = idx % 32;
+ u32 val;
+
+ val = ioread32(ring->nhi->iobase + reg);
+ if (mask)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ iowrite32(val, ring->nhi->iobase + reg);
+}
+
+/* Both @nhi->lock and @ring->lock should be held */
+static void __ring_interrupt(struct tb_ring *ring)
+{
+ if (!ring->running)
+ return;
+
+ if (ring->start_poll) {
+ __ring_interrupt_mask(ring, false);
+ ring->start_poll(ring->poll_data);
+ } else {
+ schedule_work(&ring->work);
+ }
+}
+
+/**
+ * tb_ring_poll_complete() - Re-start interrupt for the ring
+ * @ring: Ring to re-start the interrupt
+ *
+ * This will re-start (unmask) the ring interrupt once the user is done
+ * with polling.
+ */
+void tb_ring_poll_complete(struct tb_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->nhi->lock, flags);
+ spin_lock(&ring->lock);
+ if (ring->start_poll)
+ __ring_interrupt_mask(ring, false);
+ spin_unlock(&ring->lock);
+ spin_unlock_irqrestore(&ring->nhi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static irqreturn_t ring_msix(int irq, void *data)
{
struct tb_ring *ring = data;
- schedule_work(&ring->work);
+ spin_lock(&ring->nhi->lock);
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
+ spin_unlock(&ring->nhi->lock);
+
return IRQ_HANDLED;
}
@@ -320,30 +413,81 @@ static void ring_release_msix(struct tb_ring *ring)
ring->irq = 0;
}
-static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
- bool transmit, unsigned int flags)
+static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
+{
+ int ret = 0;
+
+ spin_lock_irq(&nhi->lock);
+
+ if (ring->hop < 0) {
+ unsigned int i;
+
+ /*
+ * Automatically allocate HopID from the non-reserved
+ * range 8 .. hop_count - 1.
+ */
+ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ if (ring->is_tx) {
+ if (!nhi->tx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ } else {
+ if (!nhi->rx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ if (ring->is_tx && nhi->tx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ if (ring->is_tx)
+ nhi->tx_rings[ring->hop] = ring;
+ else
+ nhi->rx_rings[ring->hop] = ring;
+
+err_unlock:
+ spin_unlock_irq(&nhi->lock);
+
+ return ret;
+}
+
+static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+ bool transmit, unsigned int flags,
+ u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *),
+ void *poll_data)
{
struct tb_ring *ring = NULL;
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
transmit ? "TX" : "RX", hop, size);
- mutex_lock(&nhi->lock);
- if (hop >= nhi->hop_count) {
- dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
- goto err;
- }
- if (transmit && nhi->tx_rings[hop]) {
- dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
- goto err;
- } else if (!transmit && nhi->rx_rings[hop]) {
- dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
- goto err;
- }
+ /* Tx Ring 2 is reserved for E2E workaround */
+ if (transmit && hop == RING_E2E_UNUSED_HOPID)
+ return NULL;
+
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err;
+ return NULL;
- mutex_init(&ring->lock);
+ spin_lock_init(&ring->lock);
INIT_LIST_HEAD(&ring->queue);
INIT_LIST_HEAD(&ring->in_flight);
INIT_WORK(&ring->work, ring_work);
@@ -353,55 +497,88 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->is_tx = transmit;
ring->size = size;
ring->flags = flags;
+ ring->sof_mask = sof_mask;
+ ring->eof_mask = eof_mask;
ring->head = 0;
ring->tail = 0;
ring->running = false;
-
- if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
- goto err;
+ ring->start_poll = start_poll;
+ ring->poll_data = poll_data;
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
if (!ring->descriptors)
- goto err;
+ goto err_free_ring;
+
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+ goto err_free_descs;
+
+ if (nhi_alloc_hop(nhi, ring))
+ goto err_release_msix;
- if (transmit)
- nhi->tx_rings[hop] = ring;
- else
- nhi->rx_rings[hop] = ring;
- mutex_unlock(&nhi->lock);
return ring;
-err:
- if (ring)
- mutex_destroy(&ring->lock);
+err_release_msix:
+ ring_release_msix(ring);
+err_free_descs:
+ dma_free_coherent(&ring->nhi->pdev->dev,
+ ring->size * sizeof(*ring->descriptors),
+ ring->descriptors, ring->descriptors_dma);
+err_free_ring:
kfree(ring);
- mutex_unlock(&nhi->lock);
+
return NULL;
}
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags)
+/**
+ * tb_ring_alloc_tx() - Allocate DMA ring for transmit
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ */
+struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, true, flags);
+ return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags)
+/**
+ * tb_ring_alloc_rx() - Allocate DMA ring for receive
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ * @sof_mask: Mask of PDF values that start a frame
+ * @eof_mask: Mask of PDF values that end a frame
+ * @start_poll: If not %NULL the ring will call this function when an
+ * interrupt is triggered and masked, instead of callback
+ * in each Rx frame.
+ * @poll_data: Optional data passed to @start_poll
+ */
+struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags, u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *), void *poll_data)
{
- return ring_alloc(nhi, hop, size, false, flags);
+ return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
+ start_poll, poll_data);
}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
/**
- * ring_start() - enable a ring
+ * tb_ring_start() - enable a ring
*
- * Must not be invoked in parallel with ring_stop().
+ * Must not be invoked in parallel with tb_ring_stop().
*/
-void ring_start(struct tb_ring *ring)
+void tb_ring_start(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
- mutex_lock(&ring->lock);
+ u16 frame_size;
+ u32 flags;
+
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
if (ring->nhi->going_away)
goto err;
if (ring->running) {
@@ -411,43 +588,65 @@ void ring_start(struct tb_ring *ring)
dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
RING_TYPE(ring), ring->hop);
+ if (ring->flags & RING_FLAG_FRAME) {
+ /* Means 4096 */
+ frame_size = 0;
+ flags = RING_FLAG_ENABLE;
+ } else {
+ frame_size = TB_FRAME_SIZE;
+ flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
+ }
+
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ u32 hop;
+
+ /*
+ * In order not to lose Rx packets we enable end-to-end
+ * workaround which transfers Rx credits to an unused Tx
+ * HopID.
+ */
+ hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
+ hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
+ flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
+ }
+
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
ring_iowrite32options(ring, 0, 4); /* time releated ? */
- ring_iowrite32options(ring,
- RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+ ring_iowrite32options(ring, flags, 0);
} else {
- ring_iowrite32desc(ring,
- (TB_FRAME_SIZE << 16) | ring->size, 12);
- ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
- ring_iowrite32options(ring,
- RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+ u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
+
+ ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
+ ring_iowrite32options(ring, sof_eof_mask, 4);
+ ring_iowrite32options(ring, flags, 0);
}
ring_interrupt_active(ring, true);
ring->running = true;
err:
- mutex_unlock(&ring->lock);
- mutex_unlock(&ring->nhi->lock);
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
}
-
+EXPORT_SYMBOL_GPL(tb_ring_start);
/**
- * ring_stop() - shutdown a ring
+ * tb_ring_stop() - shutdown a ring
*
* Must not be invoked from a callback.
*
- * This method will disable the ring. Further calls to ring_tx/ring_rx will
- * return -ESHUTDOWN until ring_stop has been called.
+ * This method will disable the ring. Further calls to
+ * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
+ * called.
*
* All enqueued frames will be canceled and their callbacks will be executed
* with frame->canceled set to true (on the callback thread). This method
* returns only after all callback invocations have finished.
*/
-void ring_stop(struct tb_ring *ring)
+void tb_ring_stop(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
- mutex_lock(&ring->lock);
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
@@ -468,8 +667,8 @@ void ring_stop(struct tb_ring *ring)
ring->running = false;
err:
- mutex_unlock(&ring->lock);
- mutex_unlock(&ring->nhi->lock);
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
/*
* schedule ring->work to invoke callbacks on all remaining frames.
@@ -477,9 +676,10 @@ err:
schedule_work(&ring->work);
flush_work(&ring->work);
}
+EXPORT_SYMBOL_GPL(tb_ring_stop);
/*
- * ring_free() - free ring
+ * tb_ring_free() - free ring
*
* When this method returns all invocations of ring->callback will have
* finished.
@@ -488,9 +688,9 @@ err:
*
* Must NOT be called from ring_frame->callback!
*/
-void ring_free(struct tb_ring *ring)
+void tb_ring_free(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
+ spin_lock_irq(&ring->nhi->lock);
/*
* Dissociate the ring from the NHI. This also ensures that
* nhi_interrupt_work cannot reschedule ring->work.
@@ -504,6 +704,7 @@ void ring_free(struct tb_ring *ring)
dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
RING_TYPE(ring), ring->hop);
}
+ spin_unlock_irq(&ring->nhi->lock);
ring_release_msix(ring);
@@ -520,16 +721,15 @@ void ring_free(struct tb_ring *ring)
RING_TYPE(ring),
ring->hop);
- mutex_unlock(&ring->nhi->lock);
/**
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
* to finish before freeing the ring.
*/
flush_work(&ring->work);
- mutex_destroy(&ring->lock);
kfree(ring);
}
+EXPORT_SYMBOL_GPL(tb_ring_free);
/**
* nhi_mailbox_cmd() - Send a command through NHI mailbox
@@ -595,7 +795,7 @@ static void nhi_interrupt_work(struct work_struct *work)
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
struct tb_ring *ring;
- mutex_lock(&nhi->lock);
+ spin_lock_irq(&nhi->lock);
/*
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
@@ -630,10 +830,12 @@ static void nhi_interrupt_work(struct work_struct *work)
hop);
continue;
}
- /* we do not check ring->running, this is done in ring->work */
- schedule_work(&ring->work);
+
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
}
- mutex_unlock(&nhi->lock);
+ spin_unlock_irq(&nhi->lock);
}
static irqreturn_t nhi_msi(int irq, void *data)
@@ -651,6 +853,22 @@ static int nhi_suspend_noirq(struct device *dev)
return tb_domain_suspend_noirq(tb);
}
+static void nhi_enable_int_throttling(struct tb_nhi *nhi)
+{
+ /* Throttling is specified in 256ns increments */
+ u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
+ unsigned int i;
+
+ /*
+ * Configure interrupt throttling for all vectors even if we
+ * only use few.
+ */
+ for (i = 0; i < MSIX_MAX_VECS; i++) {
+ u32 reg = REG_INT_THROTTLING_RATE + i * 4;
+ iowrite32(throttle, nhi->iobase + reg);
+ }
+}
+
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -663,6 +881,8 @@ static int nhi_resume_noirq(struct device *dev)
*/
if (!pci_device_is_present(pdev))
tb->nhi->going_away = true;
+ else
+ nhi_enable_int_throttling(tb->nhi);
return tb_domain_resume_noirq(tb);
}
@@ -705,7 +925,6 @@ static void nhi_shutdown(struct tb_nhi *nhi)
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
flush_work(&nhi->interrupt_work);
}
- mutex_destroy(&nhi->lock);
ida_destroy(&nhi->msix_ida);
}
@@ -717,6 +936,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
/* In case someone left them on. */
nhi_disable_interrupts(nhi);
+ nhi_enable_int_throttling(nhi);
+
ida_init(&nhi->msix_ida);
/*
@@ -792,13 +1013,10 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
- mutex_init(&nhi->lock);
+ spin_lock_init(&nhi->lock);
pci_set_master(pdev);
- /* magic value - clock related? */
- iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
-
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
@@ -926,5 +1144,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-module_init(nhi_init);
+fs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 5b5bb2c436be..4476ab4cfd0c 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - NHI driver
*
@@ -7,144 +8,7 @@
#ifndef DSL3510_H_
#define DSL3510_H_
-#include <linux/idr.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-/**
- * struct tb_nhi - thunderbolt native host interface
- * @lock: Must be held during ring creation/destruction. Is acquired by
- * interrupt_work when dispatching interrupts to individual rings.
- * @pdev: Pointer to the PCI device
- * @iobase: MMIO space of the NHI
- * @tx_rings: All Tx rings available on this host controller
- * @rx_rings: All Rx rings available on this host controller
- * @msix_ida: Used to allocate MSI-X vectors for rings
- * @going_away: The host controller device is about to disappear so when
- * this flag is set, avoid touching the hardware anymore.
- * @interrupt_work: Work scheduled to handle ring interrupt when no
- * MSI-X is used.
- * @hop_count: Number of rings (end point hops) supported by NHI.
- */
-struct tb_nhi {
- struct mutex lock;
- struct pci_dev *pdev;
- void __iomem *iobase;
- struct tb_ring **tx_rings;
- struct tb_ring **rx_rings;
- struct ida msix_ida;
- bool going_away;
- struct work_struct interrupt_work;
- u32 hop_count;
-};
-
-/**
- * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
- * @lock: Lock serializing actions to this ring. Must be acquired after
- * nhi->lock.
- * @nhi: Pointer to the native host controller interface
- * @size: Size of the ring
- * @hop: Hop (DMA channel) associated with this ring
- * @head: Head of the ring (write next descriptor here)
- * @tail: Tail of the ring (complete next descriptor here)
- * @descriptors: Allocated descriptors for this ring
- * @queue: Queue holding frames to be transferred over this ring
- * @in_flight: Queue holding frames that are currently in flight
- * @work: Interrupt work structure
- * @is_tx: Is the ring Tx or Rx
- * @running: Is the ring running
- * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
- * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
- * @flags: Ring specific flags
- */
-struct tb_ring {
- struct mutex lock;
- struct tb_nhi *nhi;
- int size;
- int hop;
- int head;
- int tail;
- struct ring_desc *descriptors;
- dma_addr_t descriptors_dma;
- struct list_head queue;
- struct list_head in_flight;
- struct work_struct work;
- bool is_tx:1;
- bool running:1;
- int irq;
- u8 vector;
- unsigned int flags;
-};
-
-/* Leave ring interrupt enabled on suspend */
-#define RING_FLAG_NO_SUSPEND BIT(0)
-
-struct ring_frame;
-typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
-
-/**
- * struct ring_frame - for use with ring_rx/ring_tx
- */
-struct ring_frame {
- dma_addr_t buffer_phy;
- ring_cb callback;
- struct list_head list;
- u32 size:12; /* TX: in, RX: out*/
- u32 flags:12; /* RX: out */
- u32 eof:4; /* TX:in, RX: out */
- u32 sof:4; /* TX:in, RX: out */
-};
-
-#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
-
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags);
-void ring_start(struct tb_ring *ring);
-void ring_stop(struct tb_ring *ring);
-void ring_free(struct tb_ring *ring);
-
-int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
-
-/**
- * ring_rx() - enqueue a frame on an RX ring
- *
- * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
- * buffer must contain at least TB_FRAME_SIZE bytes.
- *
- * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
- * frame->sof set once the frame has been received.
- *
- * If ring_stop is called after the packet has been enqueued frame->callback
- * will be called with canceled set to true.
- *
- * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
- */
-static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
-{
- WARN_ON(ring->is_tx);
- return __ring_enqueue(ring, frame);
-}
-
-/**
- * ring_tx() - enqueue a frame on an TX ring
- *
- * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
- * and frame->sof have to be set.
- *
- * frame->callback will be invoked with once the frame has been transmitted.
- *
- * If ring_stop is called after the packet has been enqueued frame->callback
- * will be called with canceled set to true.
- *
- * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
- */
-static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
-{
- WARN_ON(!ring->is_tx);
- return __ring_enqueue(ring, frame);
-}
+#include <linux/thunderbolt.h>
enum nhi_fw_mode {
NHI_FW_SAFE_MODE,
@@ -157,6 +21,8 @@ enum nhi_mailbox_cmd {
NHI_MAILBOX_SAVE_DEVS = 0x05,
NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
NHI_MAILBOX_DRV_UNLOADS = 0x07,
+ NHI_MAILBOX_DISCONNECT_PA = 0x10,
+ NHI_MAILBOX_DISCONNECT_PB = 0x11,
NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 09ed574e92ff..b3e49d19c01e 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt driver - NHI registers
*
@@ -17,13 +18,6 @@ enum ring_flags {
RING_FLAG_ENABLE = 1 << 31,
};
-enum ring_desc_flags {
- RING_DESC_ISOCH = 0x1, /* TX only? */
- RING_DESC_COMPLETED = 0x2, /* set by NHI */
- RING_DESC_POSTED = 0x4, /* always set this */
- RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
-};
-
/**
* struct ring_desc - TX/RX ring entry
*
@@ -77,6 +71,8 @@ struct ring_desc {
* ..: unknown
*/
#define REG_RX_OPTIONS_BASE 0x29800
+#define REG_RX_OPTIONS_E2E_HOP_MASK GENMASK(22, 12)
+#define REG_RX_OPTIONS_E2E_HOP_SHIFT 12
/*
* three bitfields: tx, rx, rx overflow
@@ -95,6 +91,8 @@ struct ring_desc {
#define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+#define REG_INT_THROTTLING_RATE 0x38c00
+
/* Interrupt Vector Allocation */
#define REG_INT_VEC_ALLOC_BASE 0x38c40
#define REG_INT_VEC_ALLOC_BITS 4
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 9562cd026dc0..ff49ad880bfd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - path/tunnel functionality
*
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
new file mode 100644
index 000000000000..8fe913a95b4a
--- /dev/null
+++ b/drivers/thunderbolt/property.c
@@ -0,0 +1,670 @@
+/*
+ * Thunderbolt XDomain property support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+#include <linux/thunderbolt.h>
+
+struct tb_property_entry {
+ u32 key_hi;
+ u32 key_lo;
+ u16 length;
+ u8 reserved;
+ u8 type;
+ u32 value;
+};
+
+struct tb_property_rootdir_entry {
+ u32 magic;
+ u32 length;
+ struct tb_property_entry entries[];
+};
+
+struct tb_property_dir_entry {
+ u32 uuid[4];
+ struct tb_property_entry entries[];
+};
+
+#define TB_PROPERTY_ROOTDIR_MAGIC 0x55584401
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len,
+ bool is_root);
+
+static inline void parse_dwdata(void *dst, const void *src, size_t dwords)
+{
+ be32_to_cpu_array(dst, src, dwords);
+}
+
+static inline void format_dwdata(void *dst, const void *src, size_t dwords)
+{
+ cpu_to_be32_array(dst, src, dwords);
+}
+
+static bool tb_property_entry_valid(const struct tb_property_entry *entry,
+ size_t block_len)
+{
+ switch (entry->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (entry->length > block_len)
+ return false;
+ if (entry->value + entry->length > block_len)
+ return false;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ if (entry->length != 1)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool tb_property_key_valid(const char *key)
+{
+ return key && strlen(key) <= TB_PROPERTY_KEY_SIZE;
+}
+
+static struct tb_property *
+tb_property_alloc(const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ property = kzalloc(sizeof(*property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ strcpy(property->key, key);
+ property->type = type;
+ INIT_LIST_HEAD(&property->list);
+
+ return property;
+}
+
+static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
+ const struct tb_property_entry *entry)
+{
+ char key[TB_PROPERTY_KEY_SIZE + 1];
+ struct tb_property *property;
+ struct tb_property_dir *dir;
+
+ if (!tb_property_entry_valid(entry, block_len))
+ return NULL;
+
+ parse_dwdata(key, entry, 2);
+ key[TB_PROPERTY_KEY_SIZE] = '\0';
+
+ property = tb_property_alloc(key, entry->type);
+ if (!property)
+ return NULL;
+
+ property->length = entry->length;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ dir = __tb_property_parse_dir(block, block_len, entry->value,
+ entry->length, false);
+ if (!dir) {
+ kfree(property);
+ return NULL;
+ }
+ property->value.dir = dir;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ property->value.data = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.data) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.data, block + entry->value,
+ entry->length);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ property->value.text = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.text) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.text, block + entry->value,
+ entry->length);
+ /* Force null termination */
+ property->value.text[property->length * 4 - 1] = '\0';
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ property->value.immediate = entry->value;
+ break;
+
+ default:
+ property->type = TB_PROPERTY_TYPE_UNKNOWN;
+ break;
+ }
+
+ return property;
+}
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root)
+{
+ const struct tb_property_entry *entries;
+ size_t i, content_len, nentries;
+ unsigned int content_offset;
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ if (is_root) {
+ content_offset = dir_offset + 2;
+ content_len = dir_len;
+ } else {
+ dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
+ GFP_KERNEL);
+ content_offset = dir_offset + 4;
+ content_len = dir_len - 4; /* Length includes UUID */
+ }
+
+ entries = (const struct tb_property_entry *)&block[content_offset];
+ nentries = content_len / (sizeof(*entries) / 4);
+
+ INIT_LIST_HEAD(&dir->properties);
+
+ for (i = 0; i < nentries; i++) {
+ struct tb_property *property;
+
+ property = tb_property_parse(block, block_len, &entries[i]);
+ if (!property) {
+ tb_property_free_dir(dir);
+ return NULL;
+ }
+
+ list_add_tail(&property->list, &dir->properties);
+ }
+
+ return dir;
+}
+
+/**
+ * tb_property_parse_dir() - Parses properties from given property block
+ * @block: Property block to parse
+ * @block_len: Number of dword elements in the property block
+ *
+ * This function parses the XDomain properties data block into format that
+ * can be traversed using the helper functions provided by this module.
+ * Upon success returns the parsed directory. In case of error returns
+ * %NULL. The resulting &struct tb_property_dir needs to be released by
+ * calling tb_property_free_dir() when not needed anymore.
+ *
+ * The @block is expected to be root directory.
+ */
+struct tb_property_dir *tb_property_parse_dir(const u32 *block,
+ size_t block_len)
+{
+ const struct tb_property_rootdir_entry *rootdir =
+ (const struct tb_property_rootdir_entry *)block;
+
+ if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC)
+ return NULL;
+ if (rootdir->length > block_len)
+ return NULL;
+
+ return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
+ true);
+}
+
+/**
+ * tb_property_create_dir() - Creates new property directory
+ * @uuid: UUID used to identify the particular directory
+ *
+ * Creates new, empty property directory. If @uuid is %NULL then the
+ * directory is assumed to be root directory.
+ */
+struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
+{
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ INIT_LIST_HEAD(&dir->properties);
+ if (uuid) {
+ dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL);
+ if (!dir->uuid) {
+ kfree(dir);
+ return NULL;
+ }
+ }
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(tb_property_create_dir);
+
+static void tb_property_free(struct tb_property *property)
+{
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ tb_property_free_dir(property->value.dir);
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ kfree(property->value.data);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ kfree(property->value.text);
+ break;
+
+ default:
+ break;
+ }
+
+ kfree(property);
+}
+
+/**
+ * tb_property_free_dir() - Release memory allocated for property directory
+ * @dir: Directory to release
+ *
+ * This will release all the memory the directory occupies including all
+ * descendants. It is OK to pass %NULL @dir, then the function does
+ * nothing.
+ */
+void tb_property_free_dir(struct tb_property_dir *dir)
+{
+ struct tb_property *property, *tmp;
+
+ if (!dir)
+ return;
+
+ list_for_each_entry_safe(property, tmp, &dir->properties, list) {
+ list_del(&property->list);
+ tb_property_free(property);
+ }
+ kfree(dir->uuid);
+ kfree(dir);
+}
+EXPORT_SYMBOL_GPL(tb_property_free_dir);
+
+static size_t tb_property_dir_length(const struct tb_property_dir *dir,
+ bool recurse, size_t *data_len)
+{
+ const struct tb_property *property;
+ size_t len = 0;
+
+ if (dir->uuid)
+ len += sizeof(*dir->uuid) / 4;
+ else
+ len += sizeof(struct tb_property_rootdir_entry) / 4;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ len += sizeof(struct tb_property_entry) / 4;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ if (recurse) {
+ len += tb_property_dir_length(
+ property->value.dir, recurse, data_len);
+ }
+ /* Reserve dword padding after each directory */
+ if (data_len)
+ *data_len += 1;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (data_len)
+ *data_len += property->length;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
+ u32 *block, unsigned int start_offset, size_t block_len)
+{
+ unsigned int data_offset, dir_end;
+ const struct tb_property *property;
+ struct tb_property_entry *entry;
+ size_t dir_len, data_len = 0;
+ int ret;
+
+ /*
+ * The structure of property block looks like following. Leaf
+ * data/text is included right after the directory and each
+ * directory follows each other (even nested ones).
+ *
+ * +----------+ <-- start_offset
+ * | header | <-- root directory header
+ * +----------+ ---
+ * | entry 0 | -^--------------------.
+ * +----------+ | |
+ * | entry 1 | -|--------------------|--.
+ * +----------+ | | |
+ * | entry 2 | -|-----------------. | |
+ * +----------+ | | | |
+ * : : | dir_len | | |
+ * . . | | | |
+ * : : | | | |
+ * +----------+ | | | |
+ * | entry n | v | | |
+ * +----------+ <-- data_offset | | |
+ * | data 0 | <------------------|--' |
+ * +----------+ | |
+ * | data 1 | <------------------|-----'
+ * +----------+ |
+ * | 00000000 | padding |
+ * +----------+ <-- dir_end <------'
+ * | UUID | <-- directory UUID (child directory)
+ * +----------+
+ * | entry 0 |
+ * +----------+
+ * | entry 1 |
+ * +----------+
+ * : :
+ * . .
+ * : :
+ * +----------+
+ * | entry n |
+ * +----------+
+ * | data 0 |
+ * +----------+
+ *
+ * We use dir_end to hold pointer to the end of the directory. It
+ * will increase as we add directories and each directory should be
+ * added starting from previous dir_end.
+ */
+ dir_len = tb_property_dir_length(dir, false, &data_len);
+ data_offset = start_offset + dir_len;
+ dir_end = start_offset + data_len + dir_len;
+
+ if (data_offset > dir_end)
+ return -EINVAL;
+ if (dir_end > block_len)
+ return -EINVAL;
+
+ /* Write headers first */
+ if (dir->uuid) {
+ struct tb_property_dir_entry *pe;
+
+ pe = (struct tb_property_dir_entry *)&block[start_offset];
+ memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid));
+ entry = pe->entries;
+ } else {
+ struct tb_property_rootdir_entry *re;
+
+ re = (struct tb_property_rootdir_entry *)&block[start_offset];
+ re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
+ re->length = dir_len - sizeof(*re) / 4;
+ entry = re->entries;
+ }
+
+ list_for_each_entry(property, &dir->properties, list) {
+ const struct tb_property_dir *child;
+
+ format_dwdata(entry, property->key, 2);
+ entry->type = property->type;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ child = property->value.dir;
+ ret = __tb_property_format_dir(child, block, dir_end,
+ block_len);
+ if (ret < 0)
+ return ret;
+ entry->length = tb_property_dir_length(child, false,
+ NULL);
+ entry->value = dir_end;
+ dir_end = ret;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ format_dwdata(&block[data_offset], property->value.data,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ format_dwdata(&block[data_offset], property->value.text,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ entry->length = property->length;
+ entry->value = property->value.immediate;
+ break;
+
+ default:
+ break;
+ }
+
+ entry++;
+ }
+
+ return dir_end;
+}
+
+/**
+ * tb_property_format_dir() - Formats directory to the packed XDomain format
+ * @dir: Directory to format
+ * @block: Property block where the packed data is placed
+ * @block_len: Length of the property block
+ *
+ * This function formats the directory to the packed format that can be
+ * then send over the thunderbolt fabric to receiving host. Returns %0 in
+ * case of success and negative errno on faulure. Passing %NULL in @block
+ * returns number of entries the block takes.
+ */
+ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
+ size_t block_len)
+{
+ ssize_t ret;
+
+ if (!block) {
+ size_t dir_len, data_len = 0;
+
+ dir_len = tb_property_dir_length(dir, true, &data_len);
+ return dir_len + data_len;
+ }
+
+ ret = __tb_property_format_dir(dir, block, 0, block_len);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * tb_property_add_immediate() - Add immediate property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @value: Immediate value to store with the property
+ */
+int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
+ u32 value)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = 1;
+ property->value.immediate = value;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_immediate);
+
+/**
+ * tb_property_add_data() - Adds arbitrary data property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @buf: Data buffer to add
+ * @buflen: Number of bytes in the data buffer
+ *
+ * Function takes a copy of @buf and adds it to the directory.
+ */
+int tb_property_add_data(struct tb_property_dir *parent, const char *key,
+ const void *buf, size_t buflen)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(buflen, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
+ memcpy(property->value.data, buf, buflen);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_data);
+
+/**
+ * tb_property_add_text() - Adds string property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @text: String to add
+ *
+ * Function takes a copy of @text and adds it to the directory.
+ */
+int tb_property_add_text(struct tb_property_dir *parent, const char *key,
+ const char *text)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(strlen(text) + 1, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
+ strcpy(property->value.text, text);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_text);
+
+/**
+ * tb_property_add_dir() - Adds a directory to the parent directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @dir: Directory to add
+ */
+int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
+ struct tb_property_dir *dir)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY);
+ if (!property)
+ return -ENOMEM;
+
+ property->value.dir = dir;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_dir);
+
+/**
+ * tb_property_remove() - Removes property from a parent directory
+ * @property: Property to remove
+ *
+ * Note memory for @property is released as well so it is not allowed to
+ * touch the object after call to this function.
+ */
+void tb_property_remove(struct tb_property *property)
+{
+ list_del(&property->list);
+ kfree(property);
+}
+EXPORT_SYMBOL_GPL(tb_property_remove);
+
+/**
+ * tb_property_find() - Find a property from a directory
+ * @dir: Directory where the property is searched
+ * @key: Key to look for
+ * @type: Type of the property
+ *
+ * Finds and returns property from the given directory. Does not recurse
+ * into sub-directories. Returns %NULL if the property was not found.
+ */
+struct tb_property *tb_property_find(struct tb_property_dir *dir,
+ const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ if (property->type == type && !strcmp(property->key, key))
+ return property;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_property_find);
+
+/**
+ * tb_property_get_next() - Get next property from directory
+ * @dir: Directory holding properties
+ * @prev: Previous property in the directory (%NULL returns the first)
+ */
+struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
+ struct tb_property *prev)
+{
+ if (prev) {
+ if (list_is_last(&prev->list, &dir->properties))
+ return NULL;
+ return list_next_entry(prev, list);
+ }
+ return list_first_entry_or_null(&dir->properties, struct tb_property,
+ list);
+}
+EXPORT_SYMBOL_GPL(tb_property_get_next);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 53f40c57df59..da54ace4dd2f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - switch/port utility functions
*
@@ -171,11 +172,11 @@ static int nvm_authenticate_host(struct tb_switch *sw)
/*
* Root switch NVM upgrade requires that we disconnect the
- * existing PCIe paths first (in case it is not in safe mode
+ * existing paths first (in case it is not in safe mode
* already).
*/
if (!sw->safe_mode) {
- ret = tb_domain_disconnect_pcie_paths(sw->tb);
+ ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
/*
@@ -1363,6 +1364,9 @@ void tb_switch_remove(struct tb_switch *sw)
if (sw->ports[i].remote)
tb_switch_remove(sw->ports[i].remote->sw);
sw->ports[i].remote = NULL;
+ if (sw->ports[i].xdomain)
+ tb_xdomain_remove(sw->ports[i].xdomain);
+ sw->ports[i].xdomain = NULL;
}
if (!sw->is_unplugged)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 0b22ad9d68b4..d674e06767a5 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index e0deee4f1eb0..895c57a0a090 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
@@ -9,6 +10,7 @@
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
+#include <linux/thunderbolt.h>
#include <linux/uuid.h>
#include "tb_regs.h"
@@ -39,23 +41,7 @@ struct tb_switch_nvm {
bool authenticating;
};
-/**
- * enum tb_security_level - Thunderbolt security level
- * @TB_SECURITY_NONE: No security, legacy mode
- * @TB_SECURITY_USER: User approval required at minimum
- * @TB_SECURITY_SECURE: One time saved key required at minimum
- * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
- */
-enum tb_security_level {
- TB_SECURITY_NONE,
- TB_SECURITY_USER,
- TB_SECURITY_SECURE,
- TB_SECURITY_DPONLY,
-};
-
#define TB_SWITCH_KEY_SIZE 32
-/* Each physical port contains 2 links on modern controllers */
-#define TB_SWITCH_LINKS_PER_PHY_PORT 2
/**
* struct tb_switch - a thunderbolt switch
@@ -125,14 +111,25 @@ struct tb_switch {
/**
* struct tb_port - a thunderbolt port, part of a tb_switch
+ * @config: Cached port configuration read from registers
+ * @sw: Switch the port belongs to
+ * @remote: Remote port (%NULL if not connected)
+ * @xdomain: Remote host (%NULL if not connected)
+ * @cap_phy: Offset, zero if not found
+ * @port: Port number on switch
+ * @disabled: Disabled by eeprom
+ * @dual_link_port: If the switch is connected using two ports, points
+ * to the other port.
+ * @link_nr: Is this primary or secondary port on the dual_link.
*/
struct tb_port {
struct tb_regs_port_header config;
struct tb_switch *sw;
- struct tb_port *remote; /* remote port, NULL if not connected */
- int cap_phy; /* offset, zero if not found */
- u8 port; /* port number on switch */
- bool disabled; /* disabled by eeprom */
+ struct tb_port *remote;
+ struct tb_xdomain *xdomain;
+ int cap_phy;
+ u8 port;
+ bool disabled;
struct tb_port *dual_link_port;
u8 link_nr:1;
};
@@ -205,6 +202,8 @@ struct tb_path {
* @add_switch_key: Add key to switch
* @challenge_switch_key: Challenge switch using key
* @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
+ * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
@@ -221,33 +220,8 @@ struct tb_cm_ops {
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response);
int (*disconnect_pcie_paths)(struct tb *tb);
-};
-
-/**
- * struct tb - main thunderbolt bus structure
- * @dev: Domain device
- * @lock: Big lock. Must be held when accessing any struct
- * tb_switch / struct tb_port.
- * @nhi: Pointer to the NHI structure
- * @ctl: Control channel for this domain
- * @wq: Ordered workqueue for all domain specific work
- * @root_switch: Root switch of this domain
- * @cm_ops: Connection manager specific operations vector
- * @index: Linux assigned domain number
- * @security_level: Current security level
- * @privdata: Private connection manager specific data
- */
-struct tb {
- struct device dev;
- struct mutex lock;
- struct tb_nhi *nhi;
- struct tb_ctl *ctl;
- struct workqueue_struct *wq;
- struct tb_switch *root_switch;
- const struct tb_cm_ops *cm_ops;
- int index;
- enum tb_security_level security_level;
- unsigned long privdata[0];
+ int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+ int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
};
static inline void *tb_priv(struct tb *tb)
@@ -368,13 +342,14 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct bus_type tb_bus_type;
extern struct device_type tb_domain_type;
extern struct device_type tb_switch_type;
int tb_domain_init(void);
void tb_domain_exit(void);
void tb_switch_exit(void);
+int tb_xdomain_init(void);
+void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
int tb_domain_add(struct tb *tb);
@@ -387,6 +362,9 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_disconnect_pcie_paths(struct tb *tb);
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_all_paths(struct tb *tb);
static inline void tb_domain_put(struct tb *tb)
{
@@ -409,11 +387,6 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
-static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
-{
- return (link - 1) / TB_SWITCH_LINKS_PER_PHY_PORT;
-}
-
static inline void tb_switch_put(struct tb_switch *sw)
{
put_device(&sw->dev);
@@ -471,4 +444,14 @@ static inline u64 tb_downstream_route(struct tb_port *port)
| ((u64) port->port << (port->sw->config.depth * 8));
}
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size);
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid);
+void tb_xdomain_add(struct tb_xdomain *xd);
+void tb_xdomain_remove(struct tb_xdomain *xd);
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth);
+
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index de6441e4a060..b0a092baa605 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -15,23 +15,6 @@
#include <linux/types.h>
#include <linux/uuid.h>
-enum tb_cfg_pkg_type {
- TB_CFG_PKG_READ = 1,
- TB_CFG_PKG_WRITE = 2,
- TB_CFG_PKG_ERROR = 3,
- TB_CFG_PKG_NOTIFY_ACK = 4,
- TB_CFG_PKG_EVENT = 5,
- TB_CFG_PKG_XDOMAIN_REQ = 6,
- TB_CFG_PKG_XDOMAIN_RESP = 7,
- TB_CFG_PKG_OVERRIDE = 8,
- TB_CFG_PKG_RESET = 9,
- TB_CFG_PKG_ICM_EVENT = 10,
- TB_CFG_PKG_ICM_CMD = 11,
- TB_CFG_PKG_ICM_RESP = 12,
- TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
-
-};
-
enum tb_cfg_space {
TB_CFG_HOPS = 0,
TB_CFG_PORT = 1,
@@ -118,11 +101,14 @@ enum icm_pkg_code {
ICM_CHALLENGE_DEVICE = 0x5,
ICM_ADD_DEVICE_KEY = 0x6,
ICM_GET_ROUTE = 0xa,
+ ICM_APPROVE_XDOMAIN = 0x10,
};
enum icm_event_code {
ICM_EVENT_DEVICE_CONNECTED = 3,
ICM_EVENT_DEVICE_DISCONNECTED = 4,
+ ICM_EVENT_XDOMAIN_CONNECTED = 6,
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
};
struct icm_pkg_header {
@@ -130,7 +116,7 @@ struct icm_pkg_header {
u8 flags;
u8 packet_id;
u8 total_packets;
-} __packed;
+};
#define ICM_FLAGS_ERROR BIT(0)
#define ICM_FLAGS_NO_KEY BIT(1)
@@ -139,20 +125,20 @@ struct icm_pkg_header {
struct icm_pkg_driver_ready {
struct icm_pkg_header hdr;
-} __packed;
+};
struct icm_pkg_driver_ready_response {
struct icm_pkg_header hdr;
u8 romver;
u8 ramver;
u16 security_level;
-} __packed;
+};
/* Falcon Ridge & Alpine Ridge common messages */
struct icm_fr_pkg_get_topology {
struct icm_pkg_header hdr;
-} __packed;
+};
#define ICM_GET_TOPOLOGY_PACKETS 14
@@ -167,7 +153,7 @@ struct icm_fr_pkg_get_topology_response {
u32 reserved[2];
u32 ports[16];
u32 port_hop_info[16];
-} __packed;
+};
#define ICM_SWITCH_USED BIT(0)
#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1)
@@ -184,7 +170,7 @@ struct icm_fr_event_device_connected {
u8 connection_id;
u16 link_info;
u32 ep_name[55];
-} __packed;
+};
#define ICM_LINK_INFO_LINK_MASK 0x7
#define ICM_LINK_INFO_DEPTH_SHIFT 4
@@ -197,13 +183,32 @@ struct icm_fr_pkg_approve_device {
u8 connection_key;
u8 connection_id;
u16 reserved;
-} __packed;
+};
struct icm_fr_event_device_disconnected {
struct icm_pkg_header hdr;
u16 reserved;
u16 link_info;
-} __packed;
+};
+
+struct icm_fr_event_xdomain_connected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ uuid_t local_uuid;
+ u32 local_route_hi;
+ u32 local_route_lo;
+ u32 remote_route_hi;
+ u32 remote_route_lo;
+};
+
+struct icm_fr_event_xdomain_disconnected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+};
struct icm_fr_pkg_add_device_key {
struct icm_pkg_header hdr;
@@ -212,7 +217,7 @@ struct icm_fr_pkg_add_device_key {
u8 connection_id;
u16 reserved;
u32 key[8];
-} __packed;
+};
struct icm_fr_pkg_add_device_key_response {
struct icm_pkg_header hdr;
@@ -220,7 +225,7 @@ struct icm_fr_pkg_add_device_key_response {
u8 connection_key;
u8 connection_id;
u16 reserved;
-} __packed;
+};
struct icm_fr_pkg_challenge_device {
struct icm_pkg_header hdr;
@@ -229,7 +234,7 @@ struct icm_fr_pkg_challenge_device {
u8 connection_id;
u16 reserved;
u32 challenge[8];
-} __packed;
+};
struct icm_fr_pkg_challenge_device_response {
struct icm_pkg_header hdr;
@@ -239,7 +244,29 @@ struct icm_fr_pkg_challenge_device_response {
u16 reserved;
u32 challenge[8];
u32 response[8];
-} __packed;
+};
+
+struct icm_fr_pkg_approve_xdomain {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+struct icm_fr_pkg_approve_xdomain_response {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
/* Alpine Ridge only messages */
@@ -247,7 +274,7 @@ struct icm_ar_pkg_get_route {
struct icm_pkg_header hdr;
u16 reserved;
u16 link_info;
-} __packed;
+};
struct icm_ar_pkg_get_route_response {
struct icm_pkg_header hdr;
@@ -255,6 +282,85 @@ struct icm_ar_pkg_get_route_response {
u16 link_info;
u32 route_hi;
u32 route_lo;
-} __packed;
+};
+
+/* XDomain messages */
+
+struct tb_xdomain_header {
+ u32 route_hi;
+ u32 route_lo;
+ u32 length_sn;
+};
+
+#define TB_XDOMAIN_LENGTH_MASK GENMASK(5, 0)
+#define TB_XDOMAIN_SN_MASK GENMASK(28, 27)
+#define TB_XDOMAIN_SN_SHIFT 27
+
+enum tb_xdp_type {
+ UUID_REQUEST_OLD = 1,
+ UUID_RESPONSE = 2,
+ PROPERTIES_REQUEST,
+ PROPERTIES_RESPONSE,
+ PROPERTIES_CHANGED_REQUEST,
+ PROPERTIES_CHANGED_RESPONSE,
+ ERROR_RESPONSE,
+ UUID_REQUEST = 12,
+};
+
+struct tb_xdp_header {
+ struct tb_xdomain_header xd_hdr;
+ uuid_t uuid;
+ u32 type;
+};
+
+struct tb_xdp_properties {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 reserved;
+};
+
+struct tb_xdp_properties_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 data_length;
+ u32 generation;
+ u32 data[0];
+};
+
+/*
+ * Max length of data array single XDomain property response is allowed
+ * to carry.
+ */
+#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH \
+ (((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4)
+
+/* Maximum size of the total property block in dwords we allow */
+#define TB_XDP_PROPERTIES_MAX_LENGTH 500
+
+struct tb_xdp_properties_changed {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+};
+
+struct tb_xdp_properties_changed_response {
+ struct tb_xdp_header hdr;
+};
+
+enum tb_xdp_error {
+ ERROR_SUCCESS,
+ ERROR_UNKNOWN_PACKET,
+ ERROR_UNKNOWN_DOMAIN,
+ ERROR_NOT_SUPPORTED,
+ ERROR_NOT_READY,
+};
+
+struct tb_xdp_error_response {
+ struct tb_xdp_header hdr;
+ u32 error;
+};
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 582bd1f156dc..5d94142afda6 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - Port/Switch config area registers
*
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
index ca4475907d7a..0637537ea53f 100644
--- a/drivers/thunderbolt/tunnel_pci.c
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
index a67f93c140fa..f9b65fa1fd4d 100644
--- a/drivers/thunderbolt/tunnel_pci.h
+++ b/drivers/thunderbolt/tunnel_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
new file mode 100644
index 000000000000..f25d88d4552b
--- /dev/null
+++ b/drivers/thunderbolt/xdomain.c
@@ -0,0 +1,1570 @@
+/*
+ * Thunderbolt XDomain discovery protocol support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include "tb.h"
+
+#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
+#define XDOMAIN_PROPERTIES_RETRIES 60
+#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
+
+struct xdomain_request_work {
+ struct work_struct work;
+ struct tb_xdp_header *pkg;
+ struct tb *tb;
+};
+
+/* Serializes access to the properties and protocol handlers below */
+static DEFINE_MUTEX(xdomain_lock);
+
+/* Properties exposed to the remote domains */
+static struct tb_property_dir *xdomain_property_dir;
+static u32 *xdomain_property_block;
+static u32 xdomain_property_block_len;
+static u32 xdomain_property_block_gen;
+
+/* Additional protocol handlers */
+static LIST_HEAD(protocol_handlers);
+
+/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
+static const uuid_t tb_xdp_uuid =
+ UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
+ 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+
+static bool tb_xdomain_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ switch (pkg->frame.eof) {
+ case TB_CFG_PKG_ERROR:
+ return true;
+
+ case TB_CFG_PKG_XDOMAIN_RESP: {
+ const struct tb_xdp_header *res_hdr = pkg->buffer;
+ const struct tb_xdp_header *req_hdr = req->request;
+
+ if (pkg->frame.size < req->response_size / 4)
+ return false;
+
+ /* Make sure route matches */
+ if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
+ req_hdr->xd_hdr.route_hi)
+ return false;
+ if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
+ return false;
+
+ /* Check that the XDomain protocol matches */
+ if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
+ return false;
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static bool tb_xdomain_copy(struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ memcpy(req->response, pkg->buffer, req->response_size);
+ req->result.err = 0;
+ return true;
+}
+
+static void response_ready(void *data)
+{
+ tb_cfg_request_put(data);
+}
+
+static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = response;
+ req->request_size = size;
+ req->request_type = type;
+
+ return tb_cfg_request(ctl, req, response_ready, req);
+}
+
+/**
+ * tb_xdomain_response() - Send a XDomain response message
+ * @xd: XDomain to send the message
+ * @response: Response to send
+ * @size: Size of the response
+ * @type: PDF type of the response
+ *
+ * This can be used to send a XDomain response message to the other
+ * domain. No response for the message is expected.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ return __tb_xdomain_response(xd->tb->ctl, response, size, type);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_response);
+
+static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
+ size_t response_size, enum tb_cfg_pkg_type response_type,
+ unsigned int timeout_msec)
+{
+ struct tb_cfg_request *req;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = request;
+ req->request_size = request_size;
+ req->request_type = request_type;
+ req->response = response;
+ req->response_size = response_size;
+ req->response_type = response_type;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res.err == 1 ? -EIO : res.err;
+}
+
+/**
+ * tb_xdomain_request() - Send a XDomain request
+ * @xd: XDomain to send the request
+ * @request: Request to send
+ * @request_size: Size of the request in bytes
+ * @request_type: PDF type of the request
+ * @response: Response is copied here
+ * @response_size: Expected size of the response in bytes
+ * @response_type: Expected PDF type of the response
+ * @timeout_msec: Timeout in milliseconds to wait for the response
+ *
+ * This function can be used to send XDomain control channel messages to
+ * the other domain. The function waits until the response is received
+ * or when timeout triggers. Whichever comes first.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type,
+ void *response, size_t response_size,
+ enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
+{
+ return __tb_xdomain_request(xd->tb->ctl, request, request_size,
+ request_type, response, response_size,
+ response_type, timeout_msec);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_request);
+
+static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
+ u8 sequence, enum tb_xdp_type type, size_t size)
+{
+ u32 length_sn;
+
+ length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
+ length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
+
+ hdr->xd_hdr.route_hi = upper_32_bits(route);
+ hdr->xd_hdr.route_lo = lower_32_bits(route);
+ hdr->xd_hdr.length_sn = length_sn;
+ hdr->type = type;
+ memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
+}
+
+static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+{
+ const struct tb_xdp_error_response *error;
+
+ if (hdr->type != ERROR_RESPONSE)
+ return 0;
+
+ error = (const struct tb_xdp_error_response *)hdr;
+
+ switch (error->error) {
+ case ERROR_UNKNOWN_PACKET:
+ case ERROR_UNKNOWN_DOMAIN:
+ return -EIO;
+ case ERROR_NOT_SUPPORTED:
+ return -ENOTSUPP;
+ case ERROR_NOT_READY:
+ return -EAGAIN;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ enum tb_xdp_error error)
+{
+ struct tb_xdp_error_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
+ sizeof(res));
+ res.error = error;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
+ const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
+ u32 **block, u32 *generation)
+{
+ struct tb_xdp_properties_response *res;
+ struct tb_xdp_properties req;
+ u16 data_len, len;
+ size_t total_size;
+ u32 *data = NULL;
+ int ret;
+
+ total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
+ sizeof(req));
+ memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
+ memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
+
+ len = 0;
+ data_len = 0;
+
+ do {
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, res,
+ total_size, TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ goto err;
+
+ ret = tb_xdp_handle_error(&res->hdr);
+ if (ret)
+ goto err;
+
+ /*
+ * Package length includes the whole payload without the
+ * XDomain header. Validate first that the package is at
+ * least size of the response structure.
+ */
+ len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (len < sizeof(*res) / 4) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len += sizeof(res->hdr.xd_hdr) / 4;
+ len -= sizeof(*res) / 4;
+
+ if (res->offset != req.offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * First time allocate block that has enough space for
+ * the whole properties block.
+ */
+ if (!data) {
+ data_len = res->data_length;
+ if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ memcpy(data + req.offset, res->data, len * 4);
+ req.offset += len;
+ } while (!data_len || req.offset < data_len);
+
+ *block = data;
+ *generation = res->generation;
+
+ kfree(res);
+
+ return data_len;
+
+err:
+ kfree(data);
+ kfree(res);
+
+ return ret;
+}
+
+static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
+ u64 route, u8 sequence, const uuid_t *src_uuid,
+ const struct tb_xdp_properties *req)
+{
+ struct tb_xdp_properties_response *res;
+ size_t total_size;
+ u16 len;
+ int ret;
+
+ /*
+ * Currently we expect all requests to be directed to us. The
+ * protocol supports forwarding, though which we might add
+ * support later on.
+ */
+ if (!uuid_equal(src_uuid, &req->dst_uuid)) {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_UNKNOWN_DOMAIN);
+ return 0;
+ }
+
+ mutex_lock(&xdomain_lock);
+
+ if (req->offset >= xdomain_property_block_len) {
+ mutex_unlock(&xdomain_lock);
+ return -EINVAL;
+ }
+
+ len = xdomain_property_block_len - req->offset;
+ len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
+ total_size = sizeof(*res) + len * 4;
+
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res) {
+ mutex_unlock(&xdomain_lock);
+ return -ENOMEM;
+ }
+
+ tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
+ total_size);
+ res->generation = xdomain_property_block_gen;
+ res->data_length = xdomain_property_block_len;
+ res->offset = req->offset;
+ uuid_copy(&res->src_uuid, src_uuid);
+ uuid_copy(&res->dst_uuid, &req->src_uuid);
+ memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
+
+ mutex_unlock(&xdomain_lock);
+
+ ret = __tb_xdomain_response(ctl, res, total_size,
+ TB_CFG_PKG_XDOMAIN_RESP);
+
+ kfree(res);
+ return ret;
+}
+
+static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
+ int retry, const uuid_t *uuid)
+{
+ struct tb_xdp_properties_changed_response res;
+ struct tb_xdp_properties_changed req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4,
+ PROPERTIES_CHANGED_REQUEST, sizeof(req));
+ uuid_copy(&req.src_uuid, uuid);
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return tb_xdp_handle_error(&res.hdr);
+}
+
+static int
+tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
+{
+ struct tb_xdp_properties_changed_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence,
+ PROPERTIES_CHANGED_RESPONSE, sizeof(res));
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+/**
+ * tb_register_protocol_handler() - Register protocol handler
+ * @handler: Handler to register
+ *
+ * This allows XDomain service drivers to hook into incoming XDomain
+ * messages. After this function is called the service driver needs to
+ * be able to handle calls to callback whenever a package with the
+ * registered protocol is received.
+ */
+int tb_register_protocol_handler(struct tb_protocol_handler *handler)
+{
+ if (!handler->uuid || !handler->callback)
+ return -EINVAL;
+ if (uuid_equal(handler->uuid, &tb_xdp_uuid))
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ list_add_tail(&handler->list, &protocol_handlers);
+ mutex_unlock(&xdomain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
+
+/**
+ * tb_unregister_protocol_handler() - Unregister protocol handler
+ * @handler: Handler to unregister
+ *
+ * Removes the previously registered protocol handler.
+ */
+void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
+{
+ mutex_lock(&xdomain_lock);
+ list_del_init(&handler->list);
+ mutex_unlock(&xdomain_lock);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
+
+static void tb_xdp_handle_request(struct work_struct *work)
+{
+ struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+ const struct tb_xdp_header *pkg = xw->pkg;
+ const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
+ struct tb *tb = xw->tb;
+ struct tb_ctl *ctl = tb->ctl;
+ const uuid_t *uuid;
+ int ret = 0;
+ u32 sequence;
+ u64 route;
+
+ route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
+ sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
+ sequence >>= TB_XDOMAIN_SN_SHIFT;
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch)
+ uuid = tb->root_switch->uuid;
+ else
+ uuid = NULL;
+ mutex_unlock(&tb->lock);
+
+ if (!uuid) {
+ tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
+ goto out;
+ }
+
+ switch (pkg->type) {
+ case PROPERTIES_REQUEST:
+ ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
+ (const struct tb_xdp_properties *)pkg);
+ break;
+
+ case PROPERTIES_CHANGED_REQUEST: {
+ const struct tb_xdp_properties_changed *xchg =
+ (const struct tb_xdp_properties_changed *)pkg;
+ struct tb_xdomain *xd;
+
+ ret = tb_xdp_properties_changed_response(ctl, route, sequence);
+
+ /*
+ * Since the properties have been changed, let's update
+ * the xdomain related to this connection as well in
+ * case there is a change in services it offers.
+ */
+ xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
+ if (xd) {
+ queue_delayed_work(tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(50));
+ tb_xdomain_put(xd);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (ret) {
+ tb_warn(tb, "failed to send XDomain response for %#x\n",
+ pkg->type);
+ }
+
+out:
+ kfree(xw->pkg);
+ kfree(xw);
+}
+
+static void
+tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
+ size_t size)
+{
+ struct xdomain_request_work *xw;
+
+ xw = kmalloc(sizeof(*xw), GFP_KERNEL);
+ if (!xw)
+ return;
+
+ INIT_WORK(&xw->work, tb_xdp_handle_request);
+ xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
+ xw->tb = tb;
+
+ queue_work(tb->wq, &xw->work);
+}
+
+/**
+ * tb_register_service_driver() - Register XDomain service driver
+ * @drv: Driver to register
+ *
+ * Registers new service driver from @drv to the bus.
+ */
+int tb_register_service_driver(struct tb_service_driver *drv)
+{
+ drv->driver.bus = &tb_bus_type;
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_register_service_driver);
+
+/**
+ * tb_unregister_service_driver() - Unregister XDomain service driver
+ * @xdrv: Driver to unregister
+ *
+ * Unregisters XDomain service driver from the bus.
+ */
+void tb_unregister_service_driver(struct tb_service_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /*
+ * It should be null terminated but anything else is pretty much
+ * allowed.
+ */
+ return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+}
+static DEVICE_ATTR_RO(key);
+
+static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+{
+ return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
+ svc->prtcid, svc->prtcvers, svc->prtcrevs);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /* Full buffer size except new line and null termination */
+ get_modalias(svc, buf, PAGE_SIZE - 2);
+ return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcid);
+}
+static DEVICE_ATTR_RO(prtcid);
+
+static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcvers);
+}
+static DEVICE_ATTR_RO(prtcvers);
+
+static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcrevs);
+}
+static DEVICE_ATTR_RO(prtcrevs);
+
+static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "0x%08x\n", svc->prtcstns);
+}
+static DEVICE_ATTR_RO(prtcstns);
+
+static struct attribute *tb_service_attrs[] = {
+ &dev_attr_key.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_prtcid.attr,
+ &dev_attr_prtcvers.attr,
+ &dev_attr_prtcrevs.attr,
+ &dev_attr_prtcstns.attr,
+ NULL,
+};
+
+static struct attribute_group tb_service_attr_group = {
+ .attrs = tb_service_attrs,
+};
+
+static const struct attribute_group *tb_service_attr_groups[] = {
+ &tb_service_attr_group,
+ NULL,
+};
+
+static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ char modalias[64];
+
+ get_modalias(svc, modalias, sizeof(modalias));
+ return add_uevent_var(env, "MODALIAS=%s", modalias);
+}
+
+static void tb_service_release(struct device *dev)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ struct tb_xdomain *xd = tb_service_parent(svc);
+
+ ida_simple_remove(&xd->service_ids, svc->id);
+ kfree(svc->key);
+ kfree(svc);
+}
+
+struct device_type tb_service_type = {
+ .name = "thunderbolt_service",
+ .groups = tb_service_attr_groups,
+ .uevent = tb_service_uevent,
+ .release = tb_service_release,
+};
+EXPORT_SYMBOL_GPL(tb_service_type);
+
+static int remove_missing_service(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ if (!tb_property_find(xd->properties, svc->key,
+ TB_PROPERTY_TYPE_DIRECTORY))
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int find_service(struct device *dev, void *data)
+{
+ const struct tb_property *p = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ return !strcmp(svc->key, p->key);
+}
+
+static int populate_service(struct tb_service *svc,
+ struct tb_property *property)
+{
+ struct tb_property_dir *dir = property->value.dir;
+ struct tb_property *p;
+
+ /* Fill in standard properties */
+ p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcid = p->value.immediate;
+ p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcvers = p->value.immediate;
+ p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcrevs = p->value.immediate;
+ p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcstns = p->value.immediate;
+
+ svc->key = kstrdup(property->key, GFP_KERNEL);
+ if (!svc->key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void enumerate_services(struct tb_xdomain *xd)
+{
+ struct tb_service *svc;
+ struct tb_property *p;
+ struct device *dev;
+
+ /*
+ * First remove all services that are not available anymore in
+ * the updated property block.
+ */
+ device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
+
+ /* Then re-enumerate properties creating new services as we go */
+ tb_property_for_each(xd->properties, p) {
+ if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
+ continue;
+
+ /* If the service exists already we are fine */
+ dev = device_find_child(&xd->dev, p, find_service);
+ if (dev) {
+ put_device(dev);
+ continue;
+ }
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ break;
+
+ if (populate_service(svc, p)) {
+ kfree(svc);
+ break;
+ }
+
+ svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ svc->dev.bus = &tb_bus_type;
+ svc->dev.type = &tb_service_type;
+ svc->dev.parent = &xd->dev;
+ dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
+
+ if (device_register(&svc->dev)) {
+ put_device(&svc->dev);
+ break;
+ }
+ }
+}
+
+static int populate_properties(struct tb_xdomain *xd,
+ struct tb_property_dir *dir)
+{
+ const struct tb_property *p;
+
+ /* Required properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->device = p->value.immediate;
+
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->vendor = p->value.immediate;
+
+ kfree(xd->device_name);
+ xd->device_name = NULL;
+ kfree(xd->vendor_name);
+ xd->vendor_name = NULL;
+
+ /* Optional properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
+
+ return 0;
+}
+
+/* Called with @xd->lock held */
+static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
+{
+ if (!xd->resume)
+ return;
+
+ xd->resume = false;
+ if (xd->transmit_path) {
+ dev_dbg(&xd->dev, "re-establishing DMA path\n");
+ tb_domain_approve_xdomain_paths(xd->tb, xd);
+ }
+}
+
+static void tb_xdomain_get_properties(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_properties_work.work);
+ struct tb_property_dir *dir;
+ struct tb *tb = xd->tb;
+ bool update = false;
+ u32 *block = NULL;
+ u32 gen = 0;
+ int ret;
+
+ ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
+ xd->remote_uuid, xd->properties_retries,
+ &block, &gen);
+ if (ret < 0) {
+ if (xd->properties_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ } else {
+ /* Give up now */
+ dev_err(&xd->dev,
+ "failed read XDomain properties from %pUb\n",
+ xd->remote_uuid);
+ }
+ return;
+ }
+
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+
+ mutex_lock(&xd->lock);
+
+ /* Only accept newer generation properties */
+ if (xd->properties && gen <= xd->property_block_gen) {
+ /*
+ * On resume it is likely that the properties block is
+ * not changed (unless the other end added or removed
+ * services). However, we need to make sure the existing
+ * DMA paths are restored properly.
+ */
+ tb_xdomain_restore_paths(xd);
+ goto err_free_block;
+ }
+
+ dir = tb_property_parse_dir(block, ret);
+ if (!dir) {
+ dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ goto err_free_block;
+ }
+
+ ret = populate_properties(xd, dir);
+ if (ret) {
+ dev_err(&xd->dev, "missing XDomain properties in response\n");
+ goto err_free_dir;
+ }
+
+ /* Release the existing one */
+ if (xd->properties) {
+ tb_property_free_dir(xd->properties);
+ update = true;
+ }
+
+ xd->properties = dir;
+ xd->property_block_gen = gen;
+
+ tb_xdomain_restore_paths(xd);
+
+ mutex_unlock(&xd->lock);
+
+ kfree(block);
+
+ /*
+ * Now the device should be ready enough so we can add it to the
+ * bus and let userspace know about it. If the device is already
+ * registered, we notify the userspace that it has changed.
+ */
+ if (!update) {
+ if (device_add(&xd->dev)) {
+ dev_err(&xd->dev, "failed to add XDomain device\n");
+ return;
+ }
+ } else {
+ kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
+ }
+
+ enumerate_services(xd);
+ return;
+
+err_free_dir:
+ tb_property_free_dir(dir);
+err_free_block:
+ kfree(block);
+ mutex_unlock(&xd->lock);
+}
+
+static void tb_xdomain_properties_changed(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ properties_changed_work.work);
+ int ret;
+
+ ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
+ xd->properties_changed_retries, xd->local_uuid);
+ if (ret) {
+ if (xd->properties_changed_retries-- > 0)
+ queue_delayed_work(xd->tb->wq,
+ &xd->properties_changed_work,
+ msecs_to_jiffies(1000));
+ return;
+ }
+
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%pUb\n", xd->remote_uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *xdomain_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_vendor_name.attr,
+ NULL,
+};
+
+static struct attribute_group xdomain_attr_group = {
+ .attrs = xdomain_attrs,
+};
+
+static const struct attribute_group *xdomain_attr_groups[] = {
+ &xdomain_attr_group,
+ NULL,
+};
+
+static void tb_xdomain_release(struct device *dev)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ put_device(xd->dev.parent);
+
+ tb_property_free_dir(xd->properties);
+ ida_destroy(&xd->service_ids);
+
+ kfree(xd->local_uuid);
+ kfree(xd->remote_uuid);
+ kfree(xd->device_name);
+ kfree(xd->vendor_name);
+ kfree(xd);
+}
+
+static void start_handshake(struct tb_xdomain *xd)
+{
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+
+ /* Start exchanging properties with the other host */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+ xd->properties_retries = 0;
+ xd->properties_changed_retries = 0;
+
+ cancel_delayed_work_sync(&xd->get_properties_work);
+ cancel_delayed_work_sync(&xd->properties_changed_work);
+}
+
+static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+{
+ stop_handshake(tb_to_xdomain(dev));
+ return 0;
+}
+
+static int __maybe_unused tb_xdomain_resume(struct device *dev)
+{
+ struct tb_xdomain *xd = tb_to_xdomain(dev);
+
+ /*
+ * Ask tb_xdomain_get_properties() restore any existing DMA
+ * paths after properties are re-read.
+ */
+ xd->resume = true;
+ start_handshake(xd);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tb_xdomain_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
+};
+
+struct device_type tb_xdomain_type = {
+ .name = "thunderbolt_xdomain",
+ .release = tb_xdomain_release,
+ .pm = &tb_xdomain_pm_ops,
+};
+EXPORT_SYMBOL_GPL(tb_xdomain_type);
+
+/**
+ * tb_xdomain_alloc() - Allocate new XDomain object
+ * @tb: Domain where the XDomain belongs
+ * @parent: Parent device (the switch through the connection to the
+ * other domain is reached).
+ * @route: Route string used to reach the other domain
+ * @local_uuid: Our local domain UUID
+ * @remote_uuid: UUID of the other domain
+ *
+ * Allocates new XDomain structure and returns pointer to that. The
+ * object must be released by calling tb_xdomain_put().
+ */
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid)
+{
+ struct tb_xdomain *xd;
+
+ xd = kzalloc(sizeof(*xd), GFP_KERNEL);
+ if (!xd)
+ return NULL;
+
+ xd->tb = tb;
+ xd->route = route;
+ ida_init(&xd->service_ids);
+ mutex_init(&xd->lock);
+ INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->properties_changed_work,
+ tb_xdomain_properties_changed);
+
+ xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->local_uuid)
+ goto err_free;
+
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+
+ device_initialize(&xd->dev);
+ xd->dev.parent = get_device(parent);
+ xd->dev.bus = &tb_bus_type;
+ xd->dev.type = &tb_xdomain_type;
+ xd->dev.groups = xdomain_attr_groups;
+ dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+
+ return xd;
+
+err_free_local_uuid:
+ kfree(xd->local_uuid);
+err_free:
+ kfree(xd);
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_add() - Add XDomain to the bus
+ * @xd: XDomain to add
+ *
+ * This function starts XDomain discovery protocol handshake and
+ * eventually adds the XDomain to the bus. After calling this function
+ * the caller needs to call tb_xdomain_remove() in order to remove and
+ * release the object regardless whether the handshake succeeded or not.
+ */
+void tb_xdomain_add(struct tb_xdomain *xd)
+{
+ /* Start exchanging properties with the other host */
+ start_handshake(xd);
+}
+
+static int unregister_service(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+/**
+ * tb_xdomain_remove() - Remove XDomain from the bus
+ * @xd: XDomain to remove
+ *
+ * This will stop all ongoing configuration work and remove the XDomain
+ * along with any services from the bus. When the last reference to @xd
+ * is released the object will be released as well.
+ */
+void tb_xdomain_remove(struct tb_xdomain *xd)
+{
+ stop_handshake(xd);
+
+ device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+
+ if (!device_is_registered(&xd->dev))
+ put_device(&xd->dev);
+ else
+ device_unregister(&xd->dev);
+}
+
+/**
+ * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID of the transmit path the other end is using to
+ * send packets
+ * @transmit_ring: DMA ring used to receive packets from the other end
+ * @receive_path: HopID of the receive path the other end is using to
+ * receive packets
+ * @receive_ring: DMA ring used to send packets to the other end
+ *
+ * The function enables DMA paths accordingly so that after successful
+ * return the caller can send and receive packets using high-speed DMA
+ * path.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
+ u16 transmit_ring, u16 receive_path,
+ u16 receive_ring)
+{
+ int ret;
+
+ mutex_lock(&xd->lock);
+
+ if (xd->transmit_path) {
+ ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
+ goto exit_unlock;
+ }
+
+ xd->transmit_path = transmit_path;
+ xd->transmit_ring = transmit_ring;
+ xd->receive_path = receive_path;
+ xd->receive_ring = receive_ring;
+
+ ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
+
+exit_unlock:
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+
+/**
+ * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ *
+ * This does the opposite of tb_xdomain_enable_paths(). After call to
+ * this the caller is not expected to use the rings anymore.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_disable_paths(struct tb_xdomain *xd)
+{
+ int ret = 0;
+
+ mutex_lock(&xd->lock);
+ if (xd->transmit_path) {
+ xd->transmit_path = 0;
+ xd->transmit_ring = 0;
+ xd->receive_path = 0;
+ xd->receive_ring = 0;
+
+ ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
+ }
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
+
+struct tb_xdomain_lookup {
+ const uuid_t *uuid;
+ u8 link;
+ u8 depth;
+};
+
+static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
+ const struct tb_xdomain_lookup *lookup)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_xdomain *xd;
+
+ if (tb_is_upstream_port(port))
+ continue;
+
+ if (port->xdomain) {
+ xd = port->xdomain;
+
+ if (lookup->uuid) {
+ if (uuid_equal(xd->remote_uuid, lookup->uuid))
+ return xd;
+ } else if (lookup->link == xd->link &&
+ lookup->depth == xd->depth) {
+ return xd;
+ }
+ } else if (port->remote) {
+ xd = switch_find_xdomain(port->remote->sw, lookup);
+ if (xd)
+ return xd;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
+ * @tb: Domain where the XDomain belongs to
+ * @uuid: UUID to look for
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.uuid = uuid;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ if (xd) {
+ get_device(&xd->dev);
+ return xd;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
+
+/**
+ * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
+ * @tb: Domain where the XDomain belongs to
+ * @link: Root switch link number
+ * @depth: Depth in the link
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.link = link;
+ lookup.depth = depth;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ if (xd) {
+ get_device(&xd->dev);
+ return xd;
+ }
+
+ return NULL;
+}
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ const struct tb_protocol_handler *handler, *tmp;
+ const struct tb_xdp_header *hdr = buf;
+ unsigned int length;
+ int ret = 0;
+
+ /* We expect the packet is at least size of the header */
+ length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+ if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+
+ /*
+ * Handle XDomain discovery protocol packets directly here. For
+ * other protocols (based on their UUID) we call registered
+ * handlers in turn.
+ */
+ if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
+ if (type == TB_CFG_PKG_XDOMAIN_REQ) {
+ tb_xdp_schedule_request(tb, hdr, size);
+ return true;
+ }
+ return false;
+ }
+
+ mutex_lock(&xdomain_lock);
+ list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
+ if (!uuid_equal(&hdr->uuid, handler->uuid))
+ continue;
+
+ mutex_unlock(&xdomain_lock);
+ ret = handler->callback(buf, size, handler->data);
+ mutex_lock(&xdomain_lock);
+
+ if (ret)
+ break;
+ }
+ mutex_unlock(&xdomain_lock);
+
+ return ret > 0;
+}
+
+static int rebuild_property_block(void)
+{
+ u32 *block, len;
+ int ret;
+
+ ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ len = ret;
+
+ block = kcalloc(len, sizeof(u32), GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ ret = tb_property_format_dir(xdomain_property_dir, block, len);
+ if (ret) {
+ kfree(block);
+ return ret;
+ }
+
+ kfree(xdomain_property_block);
+ xdomain_property_block = block;
+ xdomain_property_block_len = len;
+ xdomain_property_block_gen++;
+
+ return 0;
+}
+
+static int update_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+
+ xd = tb_to_xdomain(dev);
+ if (xd) {
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(50));
+ }
+
+ return 0;
+}
+
+static void update_all_xdomains(void)
+{
+ bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
+}
+
+static bool remove_directory(const char *key, const struct tb_property_dir *dir)
+{
+ struct tb_property *p;
+
+ p = tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY);
+ if (p && p->value.dir == dir) {
+ tb_property_remove(p);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tb_register_property_dir() - Register property directory to the host
+ * @key: Key (name) of the directory to add
+ * @dir: Directory to add
+ *
+ * Service drivers can use this function to add new property directory
+ * to the host available properties. The other connected hosts are
+ * notified so they can re-read properties of this host if they are
+ * interested.
+ *
+ * Return: %0 on success and negative errno on failure
+ */
+int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret;
+
+ if (WARN_ON(!xdomain_property_dir))
+ return -EAGAIN;
+
+ if (!key || strlen(key) > 8)
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ if (tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY)) {
+ ret = -EEXIST;
+ goto err_unlock;
+ }
+
+ ret = tb_property_add_dir(xdomain_property_dir, key, dir);
+ if (ret)
+ goto err_unlock;
+
+ ret = rebuild_property_block();
+ if (ret) {
+ remove_directory(key, dir);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&xdomain_lock);
+ update_all_xdomains();
+ return 0;
+
+err_unlock:
+ mutex_unlock(&xdomain_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_register_property_dir);
+
+/**
+ * tb_unregister_property_dir() - Removes property directory from host
+ * @key: Key (name) of the directory
+ * @dir: Directory to remove
+ *
+ * This will remove the existing directory from this host and notify the
+ * connected hosts about the change.
+ */
+void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret = 0;
+
+ mutex_lock(&xdomain_lock);
+ if (remove_directory(key, dir))
+ ret = rebuild_property_block();
+ mutex_unlock(&xdomain_lock);
+
+ if (!ret)
+ update_all_xdomains();
+}
+EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
+
+int tb_xdomain_init(void)
+{
+ int ret;
+
+ xdomain_property_dir = tb_property_create_dir(NULL);
+ if (!xdomain_property_dir)
+ return -ENOMEM;
+
+ /*
+ * Initialize standard set of properties without any service
+ * directories. Those will be added by service drivers
+ * themselves when they are loaded.
+ */
+ tb_property_add_immediate(xdomain_property_dir, "vendorid",
+ PCI_VENDOR_ID_INTEL);
+ tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
+ tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
+ tb_property_add_text(xdomain_property_dir, "deviceid",
+ utsname()->nodename);
+ tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
+
+ ret = rebuild_property_block();
+ if (ret) {
+ tb_property_free_dir(xdomain_property_dir);
+ xdomain_property_dir = NULL;
+ }
+
+ return ret;
+}
+
+void tb_xdomain_exit(void)
+{
+ kfree(xdomain_property_block);
+ tb_property_free_dir(xdomain_property_dir);
+}