summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2021-02-24 21:59:23 +0100
committerBjorn Helgaas <bhelgaas@google.com>2021-02-24 21:59:23 +0100
commit2ef38d7e2b01d5668c1e607ef49d866b40403806 (patch)
treeb7afc3b15e81f5a2e48b835a64dc7aa4f4e902ea /drivers/pci
parentMerge branch 'pci/microchip' (diff)
parentDocumentation: PCI: Add PCI endpoint NTB function user guide (diff)
downloadlinux-2ef38d7e2b01d5668c1e607ef49d866b40403806.tar.xz
linux-2ef38d7e2b01d5668c1e607ef49d866b40403806.zip
Merge branch 'pci/ntb'
- Account for 64-bit BARs in pci_epc_get_first_free_bar() (Kishon Vijay Abraham I) - Add pci_epc_get_next_free_bar() helper (Kishon Vijay Abraham I) - Return error codes on failure of endpoint BAR interfaces (Kishon Vijay Abraham I) - Remove unused pci_epf_match_device() (Kishon Vijay Abraham I) - Add support for secondary endpoint controller to prepare for NTB endpoint functionality (Kishon Vijay Abraham I) - Add configfs support for secondary endpoint controller (Kishon Vijay Abraham I) - Add MSI address mapping ops for NTB doorbell support (Kishon Vijay Abraham I) - Add ops for endpoint function-specific attributes (Kishon Vijay Abraham I) - Allow configfs subdirectory for endpoint function configuration (Kishon Vijay Abraham I) - Implement cadence MSI address mapping ops (Kishon Vijay Abraham I) - Configure cadence LM_EP_FUNC_CFG based on epc->function_num_map (Kishon Vijay Abraham I) - Add endpoint-side driver to provide NTB functionality (Kishon Vijay Abraham I) - Add host-side driver for generic EPF NTB functionality (Kishon Vijay Abraham I) - Document NTB endpoint functionality (Kishon Vijay Abraham I) * pci/ntb: Documentation: PCI: Add PCI endpoint NTB function user guide Documentation: PCI: Add configfs binding documentation for pci-ntb endpoint function NTB: Add support for EPF PCI Non-Transparent Bridge PCI: Add TI J721E device to PCI IDs PCI: endpoint: Add EP function driver to provide NTB functionality PCI: cadence: Configure LM_EP_FUNC_CFG based on epc->function_num_map PCI: cadence: Implement ->msi_map_irq() ops PCI: endpoint: Allow user to create sub-directory of 'EPF Device' directory PCI: endpoint: Add pci_epf_ops to expose function-specific attrs PCI: endpoint: Add pci_epc_ops to map MSI IRQ PCI: endpoint: Add support in configfs to associate two EPCs with EPF PCI: endpoint: Add support to associate secondary EPC with EPF PCI: endpoint: Remove unused pci_epf_match_device() PCI: endpoint: Make *_free_bar() to return error codes on failure PCI: endpoint: Add helper API to get the 'next' unreserved BAR PCI: endpoint: Make *_get_first_free_bar() take into account 64 bit BAR Documentation: PCI: Add specification for the PCI NTB function device
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c60
-rw-r--r--drivers/pci/endpoint/functions/Kconfig13
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2128
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c176
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c130
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c105
8 files changed, 2562 insertions, 64 deletions
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 9e2b024d32f2..897cdde02bd8 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -382,6 +382,57 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ struct cdns_pcie *pcie = &ep->pcie;
+ u64 pci_addr, pci_addr_mask = 0xff;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ int ret;
+ int i;
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = data & ~data_mask;
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ for (i = 0; i < interrupt_num; i++) {
+ ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ pci_addr & ~pci_addr_mask,
+ entry_size);
+ if (ret)
+ return ret;
+ addr = addr + entry_size;
+ }
+
+ *msi_data = data;
+ *msi_addr_offset = pci_addr & pci_addr_mask;
+
+ return 0;
+}
+
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
u16 interrupt_num)
{
@@ -455,18 +506,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- struct pci_epf *epf;
- u32 cfg;
int ret;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
- cfg = BIT(0);
- list_for_each_entry(epf, &epc->pci_epf, list)
- cfg |= BIT(epf->func_no);
- cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
ret = cdns_pcie_start_link(pcie);
if (ret) {
@@ -481,6 +527,7 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
+ .align = 256,
};
static const struct pci_epc_features*
@@ -500,6 +547,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.set_msix = cdns_pcie_ep_set_msix,
.get_msix = cdns_pcie_ep_get_msix,
.raise_irq = cdns_pcie_ep_raise_irq,
+ .map_msi_irq = cdns_pcie_ep_map_msi_irq,
.start = cdns_pcie_ep_start,
.get_features = cdns_pcie_ep_get_features,
};
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 8820d0f7ec77..5f1242ca2f4e 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -12,3 +12,16 @@ config PCI_EPF_TEST
for PCI Endpoint.
If in doubt, say "N" to disable Endpoint test driver.
+
+config PCI_EPF_NTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCI Endpoint. NTB driver implements NTB
+ controller functionality using multiple PCIe endpoint instances.
+ It can support NTB endpoint function devices created using
+ device tree.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index d6fafff080e2..96ab932a537a 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
+obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
new file mode 100644
index 000000000000..338148cf56f5
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -0,0 +1,2128 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/*
+ * The PCI NTB function driver configures the SoC with multiple PCIe Endpoint
+ * (EP) controller instances (see diagram below) in such a way that
+ * transactions from one EP controller are routed to the other EP controller.
+ * Once PCI NTB function driver configures the SoC with multiple EP instances,
+ * HOST1 and HOST2 can communicate with each other using SoC as a bridge.
+ *
+ * +-------------+ +-------------+
+ * | | | |
+ * | HOST1 | | HOST2 |
+ * | | | |
+ * +------^------+ +------^------+
+ * | |
+ * | |
+ * +---------|-------------------------------------------------|---------+
+ * | +------v------+ +------v------+ |
+ * | | | | | |
+ * | | EP | | EP | |
+ * | | CONTROLLER1 | | CONTROLLER2 | |
+ * | | <-----------------------------------> | |
+ * | | | | | |
+ * | | | | | |
+ * | | | SoC With Multiple EP Instances | | |
+ * | | | (Configured using NTB Function) | | |
+ * | +-------------+ +-------------+ |
+ * +---------------------------------------------------------------------+
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+};
+
+struct epf_ntb {
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ struct pci_epf *epf;
+ u64 mws_size[MAX_MW];
+ struct config_group group;
+ struct epf_ntb_epc *epc[2];
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+
+struct epf_ntb_epc {
+ u8 func_no;
+ bool linkup;
+ bool is_msix;
+ int msix_bar;
+ u32 spad_size;
+ struct pci_epc *epc;
+ struct epf_ntb *epf_ntb;
+ void __iomem *mw_addr[6];
+ size_t msix_table_offset;
+ struct epf_ntb_ctrl *reg;
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno epf_ntb_bar[6];
+ struct delayed_work cmd_handler;
+ enum pci_epc_interface_type type;
+ const struct pci_epc_features *epc_features;
+};
+
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 mw1_offset;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to both the hosts
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST1 and the NTB function in HOST2 invoke
+ * ntb_link_enable(), this NTB function driver will trigger a link event to
+ * the NTB client in both the hosts.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ enum pci_epc_interface_type type;
+ enum pci_epc_irq_type irq_type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ struct pci_epc *epc;
+ bool is_msix;
+ u8 func_no;
+ int ret;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ is_msix = ntb_epc->is_msix;
+ ctrl = ntb_epc->reg;
+ if (link_up)
+ ctrl->link_status |= LINK_STATUS_UP;
+ else
+ ctrl->link_status &= ~LINK_STATUS_UP;
+ irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to raise Link Up IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for one host
+ * to access the memory window of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see MW1) i.e., map OB
+ * address space of memory window to PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address in the outbound address space
+ * 2) Address in the PCI Address space
+ * 3) Size of the address region to be mapped
+ *
+ * The address in the outbound address space (for MW1, MW2, MW3 and MW4) is
+ * stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3
+ * BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This
+ * is populated in epf_ntb_alloc_peer_mem() in this driver.
+ *
+ * The address and size of the PCI address region that has to be mapped would
+ * be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is
+ * connected to HOST2.
+ *
+ * Please note Memory window1 (MW1) and Doorbell registers together will be
+ * mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's
+ * used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1],
+ * epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2].
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u64 addr, size;
+ int ret = 0;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ addr = ctrl->addr;
+ size = ctrl->size;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+
+ if (size > ntb->mws_size[mw]) {
+ dev_err(&epc->dev,
+ "%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n",
+ pci_epc_interface_string(type), mw, size,
+ ntb->mws_size[mw]);
+ ret = -EINVAL;
+ goto err_invalid_size;
+ }
+
+ func_no = ntb_epc->func_no;
+
+ ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&epc->dev,
+ "%s intf: Failed to map memory window %d address\n",
+ pci_epc_interface_string(type), mw);
+
+err_invalid_size:
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+ func_no = ntb_epc->func_no;
+
+ pci_epc_unmap_addr(epc, func_no, phys_addr);
+}
+
+/**
+ * epf_ntb_configure_msi() - Map OB address space to MSI address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS |
+ *+-----------------+ | +----------------+ | +-----------------+
+ *| BAR1 | | | Doorbell 2 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR3 | | | Doorbell 4 +---+ | |
+ *+-----------------+ | |----------------+ | |
+ *| BAR4 | | | | | |
+ *+-----------------+ | | MW1 | | |
+ *| BAR5 | | | | | |
+ *+-----------------+ +----->-----------------+ | |
+ * EP CONTROLLER 1 | | | |
+ * | | | |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * pci_epc_map_msi_irq() takes the MSI address from MSI capability register
+ * and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI
+ * address.
+ *
+ * epf_ntb_configure_msi() also stores the MSI data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msi(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u16 db_count)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ u32 db_entry_size, db_data, db_offset;
+ struct pci_epf_bar *peer_epf_bar;
+ struct epf_ntb_ctrl *peer_ctrl;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ peer_ctrl = peer_ntb_epc->reg;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+
+ ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ db_entry_size, &db_data, &db_offset);
+ if (ret) {
+ dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ for (i = 0; i < db_count; i++) {
+ peer_ctrl->db_data[i] = db_data | i;
+ peer_ctrl->db_offset[i] = db_offset;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_msix() - Map OB address space to MSI-X address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
+ *+-----------------+ | +----------------+ +-----------------+
+ *| BAR1 | | | Doorbell 2 +---------+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ *+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
+ *| BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ *+-----------------+ | |----------------+ | | | |
+ *| BAR4 | | | | | | +-----------------+
+ *+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3||
+ *| BAR5 | | | | | +-----------------+
+ *+-----------------+ +----->-----------------+ | | |
+ * EP CONTROLLER 1 | | | +-----------------+
+ * | | +---->+ MSI-X ADDRESS 4 |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI-X address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and
+ * the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected
+ * to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the
+ * offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix()
+ * gets the MSI-X address and data.
+ *
+ * epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msix(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct epf_ntb_ctrl *peer_ctrl;
+ u32 db_entry_size, msg_data;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ size_t align;
+ u64 msg_addr;
+ u8 func_no;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar];
+ msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ peer_ctrl = peer_ntb_epc->reg;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ func_no = ntb_epc->func_no;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ for (i = 0; i < db_count; i++) {
+ msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
+ msg_data = msix_tbl[i].msg_data;
+ ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ db_entry_size);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to configure MSI-X IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ phys_addr = phys_addr + db_entry_size;
+ peer_ctrl->db_data[i] = msg_data;
+ peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1);
+ }
+ ntb_epc->is_msix = true;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_db() - Configure the Outbound Address Space for one host
+ * to ring the doorbell of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Count of the number of doorbells that has to be configured
+ * @msix: Indicates whether MSI-X or MSI should be used
+ *
+ * Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for
+ * one HOST to ring the doorbell of other HOST.
+ */
+static int epf_ntb_configure_db(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count, bool msix)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ int ret;
+
+ if (db_count > MAX_DB_COUNT)
+ return -EINVAL;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ if (msix)
+ ret = epf_ntb_configure_msix(ntb, type, db_count);
+ else
+ ret = epf_ntb_configure_msi(ntb, type, db_count);
+
+ if (ret)
+ dev_err(&epc->dev, "%s intf: Failed to configure DB\n",
+ pci_epc_interface_string(type));
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X
+ * address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address.
+ */
+static void
+epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+
+ pci_epc_unmap_addr(epc, func_no, phys_addr);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY)
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ u16 db_count;
+ bool is_msix;
+ int ret;
+
+ ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
+ ctrl = ntb_epc->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb_epc->reg;
+ type = ntb_epc->type;
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ db_count = argument & DB_COUNT_MASK;
+ is_msix = argument & MSIX_ENABLE;
+ ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ epf_ntb_teardown_db(ntb, type);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, type, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, type, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb_epc->linkup = true;
+ if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
+ ntb->epc[SECONDARY_INTERFACE]->linkup) {
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ }
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_DOWN:
+ ntb_epc->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "%s intf UNKNOWN command: %d\n",
+ pci_epc_interface_string(type), command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+}
+
+/**
+ * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ enum pci_barno peer_barno, barno;
+ u32 peer_spad_offset;
+ struct pci_epc *epc;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ func_no = ntb_epc->func_no;
+ epc = ntb_epc->epc;
+
+ peer_spad_offset = peer_ntb_epc->reg->spad_offset;
+ epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
+ epf_bar->size = peer_ntb_epc->spad_size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region. While BAR0 is the default self scratchpad BAR, an
+ * NTB could have other BARs for self scratchpad (because of reserved BARs).
+ * This function can get the exact BAR used for self scratchpad from
+ * epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ struct epf_ntb *ntb;
+ struct pci_epc *epc;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
+ pci_epc_interface_string(ntb_epc->type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Free the Local Memory mentioned in the above diagram. After invoking this
+ * function, any of config + self scratchpad region of HOST1 or peer scratchpad
+ * region of HOST2 should not be accessed.
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ struct pci_epf *epf;
+
+ epf = ntb->epf;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ if (ntb_epc->reg)
+ pci_epf_free_space(epf, ntb_epc->reg, barno, type);
+ }
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ *
+ * The size of both config region and scratchpad region has to be aligned,
+ * since the scratchpad region will also be mapped as PEER SCRATCHPAD of
+ * other host using a separate BAR.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *peer_epc_features, *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ size_t msix_table_size, pba_size, align;
+ enum pci_barno peer_barno, barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size, peer_size;
+ struct pci_epf *epf;
+ struct device *dev;
+ bool msix_capable;
+ u32 spad_count;
+ void *base;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ ntb_epc = ntb->epc[type];
+
+ epc_features = ntb_epc->epc_features;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_epc_features = peer_ntb_epc->epc_features;
+ peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+
+ /* Check if epc_features is populated incorrectly */
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = spad_count * 4;
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count;
+ ctrl_size = ALIGN(ctrl_size, 8);
+ ntb_epc->msix_table_offset = ctrl_size;
+ ntb_epc->msix_bar = barno;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8);
+ ctrl_size = ctrl_size + msix_table_size + pba_size;
+ }
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (peer_size) {
+ if (peer_size < spad_size)
+ spad_count = peer_size / 4;
+ spad_size = peer_size;
+ }
+
+ /*
+ * In order to make sure SPAD offset is aligned to its size,
+ * expand control region size to the size of SPAD if SPAD size
+ * is greater than control region size.
+ */
+ if (spad_size > ctrl_size)
+ ctrl_size = spad_size;
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, type);
+ if (!base) {
+ dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
+ pci_epc_interface_string(type));
+ return -ENOMEM;
+ }
+
+ ntb_epc->reg = base;
+
+ ctrl = ntb_epc->reg;
+ ctrl->spad_offset = ctrl_size;
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ctrl->db_entry_size = align ? align : 4;
+ ntb_epc->spad_size = spad_size;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config +
+ * scratchpad region for each of PRIMARY and SECONDARY interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for
+ * config + scratchpad region for a specific interface
+ */
+static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_config_spad_bar_alloc(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address
+ * space
+ * @ntb_epc: EPC associated with one of the HOST which holds peers outbound
+ * address regions
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram.
+ * It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3,
+ * MW4).
+ */
+static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ phys_addr_t phys_addr;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ size_t size;
+
+ epc = ntb_epc->epc;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ mw_addr = ntb_epc->mw_addr[barno];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ phys_addr = epf_bar->phys_addr;
+ size = epf_bar->size;
+ if (mw_addr) {
+ pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
+ ntb_epc->mw_addr[barno] = NULL;
+ }
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Clear doorbell and memory BARs (remove inbound ATU configuration). In the above
+ * diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2
+ * BAR, MW3 BAR and MW4 BAR).
+ */
+static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ u8 func_no;
+
+ epc = ntb_epc->epc;
+
+ func_no = ntb_epc->func_no;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, epf_bar);
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory
+ * allocated in peers outbound address space
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and
+ * epf_ntb_free_peer_mem() which frees up HOST2 outbound memory.
+ */
+static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ epf_ntb_db_mw_bar_clear(ntb_epc);
+ epf_ntb_free_peer_mem(peer_ntb_epc);
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ bool msix_capable, msi_capable;
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct device *dev;
+ u32 db_count;
+ u8 func_no;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ dev = &ntb->epf->dev;
+
+ epc_features = ntb_epc->epc_features;
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+
+ if (!(msix_capable || msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ func_no = ntb_epc->func_no;
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+ epc = ntb_epc->epc;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, func_no, db_count);
+ if (ret) {
+ dev_err(dev, "%s intf: MSI configuration failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, func_no, db_count,
+ ntb_epc->msix_bar,
+ ntb_epc->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
+ * address
+ * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4)
+ * @peer_ntb_epc: EPC associated with HOST whose outbound address space is
+ * used by @ntb_epc
+ * @size: Size of the address region that has to be allocated in peers OB SPACE
+ *
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate
+ * for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4).
+ */
+static int epf_ntb_alloc_peer_mem(struct device *dev,
+ struct epf_ntb_epc *ntb_epc,
+ enum epf_ntb_bar bar,
+ struct epf_ntb_epc *peer_ntb_epc,
+ size_t size)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *peer_epc;
+ phys_addr_t phys_addr;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t align;
+
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ peer_epc = peer_ntb_epc->epc;
+ mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
+ if (!mw_addr) {
+ dev_err(dev, "%s intf: Failed to allocate OB address\n",
+ pci_epc_interface_string(peer_ntb_epc->type));
+ return -ENOMEM;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ ntb_epc->mw_addr[barno] = mw_addr;
+
+ epf_bar->phys_addr = phys_addr;
+ epf_bar->size = size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates
+ * memory in OB address space of HOST2 and configures BAR of HOST1
+ */
+static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_ctrl *ctrl;
+ u32 num_mws, db_count;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ struct device *dev;
+ size_t align;
+ int ret, i;
+ u8 func_no;
+ u64 size;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+ func_no = ntb_epc->func_no;
+ epc = ntb_epc->epc;
+ num_mws = ntb->num_mws;
+ db_count = ntb->db_count;
+
+ for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
+ if (bar == BAR_DB_MW1) {
+ align = align ? align : 4;
+ size = db_count * align;
+ size = ALIGN(size, ntb->mws_size[i]);
+ ctrl = ntb_epc->reg;
+ ctrl->mw1_offset = size;
+ size += ntb->mws_size[i];
+ } else {
+ size = ntb->mws_size[i];
+ }
+
+ ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
+ peer_ntb_epc, size);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell mem alloc failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell BAR set failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+ }
+
+ return 0;
+
+err_alloc_peer_mem:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Unbind NTB function device from EPC and relinquish reference to pci_epc
+ * for each of the interface.
+ */
+static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ if (type < 0)
+ return;
+
+ epf = ntb->epf;
+ ntb_epc = ntb->epc[type];
+ if (!ntb_epc)
+ return;
+ epc = ntb_epc->epc;
+ pci_epc_remove_epf(epc, epf, type);
+ pci_epc_put(epc);
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_destroy_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @epc: struct pci_epc to which a particular NTB interface should be associated
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Allocate memory for NTB EPC interface and initialize it.
+ */
+static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
+ struct pci_epc *epc,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ u8 func_no;
+
+ dev = &ntb->epf->dev;
+
+ ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
+ if (!ntb_epc)
+ return -ENOMEM;
+
+ epf = ntb->epf;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ epf_bar = epf->bar;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ ntb_epc->linkup = false;
+ ntb_epc->epc = epc;
+ ntb_epc->func_no = func_no;
+ ntb_epc->type = type;
+ ntb_epc->epf_bar = epf_bar;
+ ntb_epc->epf_ntb = ntb;
+
+ epc_features = pci_epc_get_features(epc, func_no);
+ if (!epc_features)
+ return -EINVAL;
+ ntb_epc->epc_features = epc_features;
+
+ ntb->epc[type] = ntb_epc;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_create() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Get a reference to EPC device and bind NTB function device to that EPC
+ * for each of the interface. It is also a wrapper to
+ * epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface
+ * and initialize it
+ */
+static int epf_ntb_epc_create(struct epf_ntb *ntb)
+{
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
+ SECONDARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
+ goto err_epc_create;
+ }
+
+ return 0;
+
+err_epc_create:
+ epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of
+ * the NTB constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD,
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4.
+ */
+static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ ntb_epc = ntb->epc[type];
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
+ pci_epc_interface_string(type));
+ return barno;
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
+ * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
+ * BAR_MW3 and BAR_MW4 for all the interfaces.
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_init_epc_bar_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "Fail to init EPC bar for %s interface\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init_interface() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ u8 func_no;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ ret = epf_ntb_peer_spad_bar_set(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Interrupt configuration failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_db_mw_bar_init(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: DB/MW BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_db_mw_bar_init;
+ }
+
+ ret = pci_epc_write_header(epc, func_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
+
+ INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+err_db_mw_bar_init:
+ epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
+
+err_peer_spad_bar_init:
+ epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_cleanup_interface() - Cleanup NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to cleanup a particular NTB interface.
+ */
+static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+
+ if (type < 0)
+ return;
+
+ ntb_epc = ntb->epc[type];
+ cancel_delayed_work(&ntb_epc->cmd_handler);
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+ epf_ntb_peer_spad_bar_clear(ntb_epc);
+ epf_ntb_config_sspad_bar_clear(ntb_epc);
+}
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_cleanup_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to initialize all NTB interface and start the workqueue
+ * to check for commands from host.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_epc_init_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Failed to initialize\n",
+ pci_epc_interface_string(type));
+ goto err_init_type;
+ }
+ }
+
+ return 0;
+
+err_init_type:
+ epf_ntb_epc_cleanup_interface(ntb, type - 1);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ if (!epf->sec_epc) {
+ dev_dbg(dev, "SECONDARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_epc_create(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int win_no; \
+ \
+ sscanf(#_name, "mw%d", &win_no); \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (ntb->num_mws < win_no) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+}
+
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_ntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_ntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index e4e51d884553..c0ac4e9cbe72 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -619,7 +619,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
if (epf_test->reg[bar]) {
pci_epc_clear_bar(epc, epf->func_no, epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
}
}
}
@@ -651,7 +652,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -771,7 +773,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align);
+ epc_features->align, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -789,7 +791,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align);
+ epc_features->align,
+ PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -834,6 +837,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
linkup_notifier = epc_features->linkup_notifier;
core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ if (test_reg_bar < 0)
+ return -EINVAL;
pci_epf_configure_bar(epf, epc_features);
}
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 3710adf51912..f3a8b833b479 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -21,6 +21,9 @@ static struct config_group *controllers_group;
struct pci_epf_group {
struct config_group group;
+ struct config_group primary_epc_group;
+ struct config_group secondary_epc_group;
+ struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
};
@@ -41,6 +44,127 @@ static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
return container_of(to_config_group(item), struct pci_epc_group, group);
}
+static int pci_secondary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, SECONDARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_secondary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_secondary_epc_item_ops = {
+ .allow_link = pci_secondary_epc_epf_link,
+ .drop_link = pci_secondary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_secondary_epc_type = {
+ .ct_item_ops = &pci_secondary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_secondary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *secondary_epc_group;
+
+ secondary_epc_group = &epf_group->secondary_epc_group;
+ config_group_init_type_name(secondary_epc_group, "secondary",
+ &pci_secondary_epc_type);
+ configfs_register_group(&epf_group->group, secondary_epc_group);
+
+ return secondary_epc_group;
+}
+
+static int pci_primary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_primary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_primary_epc_item_ops = {
+ .allow_link = pci_primary_epc_epf_link,
+ .drop_link = pci_primary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_primary_epc_type = {
+ .ct_item_ops = &pci_primary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_primary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *primary_epc_group = &epf_group->primary_epc_group;
+
+ config_group_init_type_name(primary_epc_group, "primary",
+ &pci_primary_epc_type);
+ configfs_register_group(&epf_group->group, primary_epc_group);
+
+ return primary_epc_group;
+}
+
static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
size_t len)
{
@@ -94,13 +218,13 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- ret = pci_epc_add_epf(epc, epf);
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
if (ret)
return ret;
ret = pci_epf_bind(epf);
if (ret) {
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
return ret;
}
@@ -120,7 +244,7 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
pci_epf_unbind(epf);
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
}
static struct configfs_item_operations pci_epc_item_ops = {
@@ -366,12 +490,53 @@ static struct configfs_item_operations pci_epf_ops = {
.release = pci_epf_release,
};
+static struct config_group *pci_epf_type_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(&group->cg_item);
+ struct config_group *epf_type_group;
+
+ epf_type_group = pci_epf_type_add_cfs(epf_group->epf, group);
+ return epf_type_group;
+}
+
+static void pci_epf_type_drop(struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_type_group_ops = {
+ .make_group = &pci_epf_type_make,
+ .drop_item = &pci_epf_type_drop,
+};
+
static const struct config_item_type pci_epf_type = {
+ .ct_group_ops = &pci_epf_type_group_ops,
.ct_item_ops = &pci_epf_ops,
.ct_attrs = pci_epf_attrs,
.ct_owner = THIS_MODULE,
};
+static void pci_epf_cfs_work(struct work_struct *work)
+{
+ struct pci_epf_group *epf_group;
+ struct config_group *group;
+
+ epf_group = container_of(work, struct pci_epf_group, cfs_work.work);
+ group = pci_ep_cfs_add_primary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'primary' EPC interface\n");
+ return;
+ }
+
+ group = pci_ep_cfs_add_secondary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'secondary' EPC interface\n");
+ return;
+ }
+}
+
static struct config_group *pci_epf_make(struct config_group *group,
const char *name)
{
@@ -410,10 +575,15 @@ static struct config_group *pci_epf_make(struct config_group *group,
goto free_name;
}
+ epf->group = &epf_group->group;
epf_group->epf = epf;
kfree(epf_name);
+ INIT_DELAYED_WORK(&epf_group->cfs_work, pci_epf_cfs_work);
+ queue_delayed_work(system_wq, &epf_group->cfs_work,
+ msecs_to_jiffies(1));
+
return &epf_group->group;
free_name:
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index cadd3db0cbb0..cc8f9eb2b177 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -87,24 +87,50 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* pci_epc_get_first_free_bar() - helper to get first unreserved BAR
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
- * Invoke to get the first unreserved BAR that can be used for endpoint
+ * Invoke to get the first unreserved BAR that can be used by the endpoint
* function. For any incorrect value in reserved_bar return '0'.
*/
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
- *epc_features)
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
{
- int free_bar;
+ return pci_epc_get_next_free_bar(epc_features, BAR_0);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+
+/**
+ * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ * @bar: the starting BAR number from where unreserved BAR should be searched
+ *
+ * Invoke to get the next unreserved BAR starting from @bar that can be used
+ * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ */
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar)
+{
+ unsigned long free_bar;
if (!epc_features)
- return 0;
+ return BAR_0;
+
+ /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
+ if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ bar++;
+
+ /* Find if the reserved BAR is also a 64-bit BAR */
+ free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
- free_bar = ffz(epc_features->reserved_bar);
+ /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
+ free_bar <<= 1;
+ free_bar |= epc_features->reserved_bar;
+
+ free_bar = find_next_zero_bit(&free_bar, 6, bar);
if (free_bar > 5)
- return 0;
+ return NO_BAR;
return free_bar;
}
-EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
/**
* pci_epc_get_features() - get the features supported by EPC
@@ -205,6 +231,47 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
/**
+ * pci_epc_map_msi_irq() - Map physical address to MSI address and return
+ * MSI data
+ * @epc: the EPC device which has the MSI capability
+ * @func_no: the physical endpoint function number in the EPC device
+ * @phys_addr: the physical address of the outbound region
+ * @interrupt_num: the MSI interrupt number
+ * @entry_size: Size of Outbound address region for each interrupt
+ * @msi_data: the data that should be written in order to raise MSI interrupt
+ * with interrupt number as 'interrupt num'
+ * @msi_addr_offset: Offset of MSI address from the aligned outbound address
+ * to which the MSI address is mapped
+ *
+ * Invoke to map physical address to MSI address and return MSI data. The
+ * physical address should be an address in the outbound region. This is
+ * required to implement doorbell functionality of NTB wherein EPC on either
+ * side of the interface (primary and secondary) can directly write to the
+ * physical address (in outbound region) of the other interface to ring
+ * doorbell.
+ */
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
+ u8 interrupt_num, u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (!epc->ops->map_msi_irq)
+ return -EINVAL;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
+ entry_size, msi_data, msi_addr_offset);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
+
+/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
* @func_no: the endpoint function number in the EPC device
@@ -467,21 +534,28 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
* pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
* @epc: the EPC device to which the endpoint function should be added
* @epf: the endpoint function to be added
+ * @type: Identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
*
* A PCI endpoint device can have one or more functions. In the case of PCIe,
* the specification allows up to 8 PCIe endpoint functions. Invoke
* pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
*/
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
u32 func_no;
int ret = 0;
- if (epf->epc)
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (type == PRIMARY_INTERFACE && epf->epc)
return -EBUSY;
- if (IS_ERR(epc))
- return -EINVAL;
+ if (type == SECONDARY_INTERFACE && epf->sec_epc)
+ return -EBUSY;
mutex_lock(&epc->lock);
func_no = find_first_zero_bit(&epc->function_num_map,
@@ -498,11 +572,17 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
}
set_bit(func_no, &epc->function_num_map);
- epf->func_no = func_no;
- epf->epc = epc;
-
- list_add_tail(&epf->list, &epc->pci_epf);
+ if (type == PRIMARY_INTERFACE) {
+ epf->func_no = func_no;
+ epf->epc = epc;
+ list = &epf->list;
+ } else {
+ epf->sec_epc_func_no = func_no;
+ epf->sec_epc = epc;
+ list = &epf->sec_epc_list;
+ }
+ list_add_tail(list, &epc->pci_epf);
ret:
mutex_unlock(&epc->lock);
@@ -517,14 +597,26 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*
* Invoke to remove PCI endpoint function from the endpoint controller.
*/
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
+ u32 func_no = 0;
+
if (!epc || IS_ERR(epc) || !epf)
return;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ list = &epf->list;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ list = &epf->sec_epc_list;
+ }
+
mutex_lock(&epc->lock);
- clear_bit(epf->func_no, &epc->function_num_map);
- list_del(&epf->list);
+ clear_bit(func_no, &epc->function_num_map);
+ list_del(list);
epf->epc = NULL;
mutex_unlock(&epc->lock);
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index c977cf9dce56..7646c8660d42 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,6 +21,38 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
+ * pci_epf_type_add_cfs() - Help function drivers to expose function specific
+ * attributes in configfs
+ * @epf: the EPF device that has to be configured using configfs
+ * @group: the parent configfs group (corresponding to entries in
+ * pci_epf_device_id)
+ *
+ * Invoke to expose function specific attributes in configfs. If the function
+ * driver does not have anything to expose (attributes configured by user),
+ * return NULL.
+ */
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct config_group *epf_type_group;
+
+ if (!epf->driver) {
+ dev_err(&epf->dev, "epf device not bound to driver\n");
+ return NULL;
+ }
+
+ if (!epf->driver->ops->add_cfs)
+ return NULL;
+
+ mutex_lock(&epf->lock);
+ epf_type_group = epf->driver->ops->add_cfs(epf, group);
+ mutex_unlock(&epf->lock);
+
+ return epf_type_group;
+}
+EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
+
+/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -74,24 +106,37 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
* @bar: the BAR number corresponding to the register space
+ * @type: Identifies if the allocated space is for primary EPC or secondary EPC
*
* Invoke to free the allocated PCI EPF register space.
*/
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type)
{
struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc;
if (!addr)
return;
- dma_free_coherent(dev, epf->bar[bar].size, addr,
- epf->bar[bar].phys_addr);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
- epf->bar[bar].phys_addr = 0;
- epf->bar[bar].addr = NULL;
- epf->bar[bar].size = 0;
- epf->bar[bar].barno = 0;
- epf->bar[bar].flags = 0;
+ dev = epc->dev.parent;
+ dma_free_coherent(dev, epf_bar[bar].size, addr,
+ epf_bar[bar].phys_addr);
+
+ epf_bar[bar].phys_addr = 0;
+ epf_bar[bar].addr = NULL;
+ epf_bar[bar].size = 0;
+ epf_bar[bar].barno = 0;
+ epf_bar[bar].flags = 0;
}
EXPORT_SYMBOL_GPL(pci_epf_free_space);
@@ -101,15 +146,18 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
* @align: alignment size for the allocation region
+ * @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align)
+ size_t align, enum pci_epc_interface_type type)
{
- void *space;
- struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
+ struct pci_epc *epc;
+ struct device *dev;
+ void *space;
if (size < 128)
size = 128;
@@ -119,17 +167,26 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
else
size = roundup_pow_of_two(size);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
}
- epf->bar[bar].phys_addr = phys_addr;
- epf->bar[bar].addr = space;
- epf->bar[bar].size = size;
- epf->bar[bar].barno = bar;
- epf->bar[bar].flags |= upper_32_bits(size) ?
+ epf_bar[bar].phys_addr = phys_addr;
+ epf_bar[bar].addr = space;
+ epf_bar[bar].size = size;
+ epf_bar[bar].barno = bar;
+ epf_bar[bar].flags |= upper_32_bits(size) ?
PCI_BASE_ADDRESS_MEM_TYPE_64 :
PCI_BASE_ADDRESS_MEM_TYPE_32;
@@ -282,22 +339,6 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf)
-{
- if (!id || !epf)
- return NULL;
-
- while (*id->name) {
- if (strcmp(epf->name, id->name) == 0)
- return id;
- id++;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_epf_match_device);
-
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);