summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/nhi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/nhi.c')
-rw-r--r--drivers/thunderbolt/nhi.c104
1 files changed, 67 insertions, 37 deletions
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cb8c9c4ae93a..4dce2edd86ea 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -28,7 +28,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
-
+/*
+ * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
+ * transferred.
+ */
+#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
@@ -38,7 +42,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
+/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
+#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -458,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
+ unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
+ if (nhi->quirks & QUIRK_E2E) {
+ start_hop = RING_FIRST_USABLE_HOPID + 1;
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
+ ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
+ ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
+ }
+ }
+
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
@@ -469,7 +485,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
- for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
@@ -484,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
+ if (ring->hop > 0 && ring->hop < start_hop) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
@@ -1097,12 +1118,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi)
{
- /*
- * Intel hardware supports auto clear of the interrupt status
- * reqister right after interrupt is being issued.
- */
- if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ /*
+ * Intel hardware supports auto clear of the interrupt
+ * status register right after interrupt is being
+ * issued.
+ */
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ /*
+ * Falcon Ridge controller needs the end-to-end
+ * flow control workaround to avoid losing Rx
+ * packets when RING_FLAG_E2E is set.
+ */
+ nhi->quirks |= QUIRK_E2E;
+ break;
+ }
+ }
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
@@ -1149,6 +1184,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
+ struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
@@ -1179,10 +1215,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
- if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
@@ -1223,26 +1257,21 @@ static struct tb *nhi_select_cm(struct tb_nhi *nhi)
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
- if (!nhi_imr_valid(pdev)) {
- dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
- return -ENODEV;
- }
+ if (!nhi_imr_valid(pdev))
+ return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
res = pcim_enable_device(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res) {
- dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
@@ -1253,7 +1282,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
- dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
+ dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -1266,18 +1295,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (res) {
- dev_err(&pdev->dev, "failed to set DMA mask\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
@@ -1288,13 +1313,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
tb = nhi_select_cm(nhi);
- if (!tb) {
- dev_err(&nhi->pdev->dev,
+ if (!tb)
+ return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
- return -ENODEV;
- }
- dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
@@ -1398,6 +1421,7 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Thunderbolt 4 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
@@ -1414,6 +1438,12 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },