summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2020-04-02 21:26:57 +0200
committerBjorn Helgaas <bhelgaas@google.com>2020-04-02 21:26:57 +0200
commitb16f2ab280f9c172ea02286fa298d5c8a3d68c88 (patch)
tree84da21898c8c2cd56f63d1d2793fbf2ba80552c4 /drivers/pci
parentMerge branch 'remotes/lorenzo/pci/dwc' (diff)
parentmisc: pci_endpoint_test: remove duplicate macro PCI_ENDPOINT_TEST_STATUS (diff)
downloadlinux-b16f2ab280f9c172ea02286fa298d5c8a3d68c88.tar.xz
linux-b16f2ab280f9c172ea02286fa298d5c8a3d68c88.zip
Merge branch 'remotes/lorenzo/pci/endpoint'
- Use notification chain instead of EPF linkup ops for EPC events (Kishon Vijay Abraham I) - Protect concurrent allocation in endpoint outbound address region (Kishon Vijay Abraham I) - Protect concurrent access to pci_epf_ops (Kishon Vijay Abraham I) - Assign function number for each PF in endpoint core (Kishon Vijay Abraham I) - Refactor endpoint mode core initialization (Vidya Sagar) - Add API to notify when core initialization completes (Vidya Sagar) - Add test framework support to defer core initialization (Vidya Sagar) - Update Tegra SoC ABI header to support uninitialization of UPHY PLL when in endpoint mode without reference clock (Vidya Sagar) - Add DT and driver support for Tegra194 PCIe endpoint nodes (Vidya Sagar) - Add endpoint test support for DMA data transfer (Kishon Vijay Abraham I) - Print throughput information in endpoint test (Kishon Vijay Abraham I) - Use streaming DMA APIs for endpoint test buffer allocation (Kishon Vijay Abraham I) - Add endpoint test command line option for DMA (Kishon Vijay Abraham I) - When stopping a controller via configfs, clear endpoint "start" entry to prevent WARN_ON (Kunihiko Hayashi) - Update endpoint ->set_msix() to pay attention to MSI-X BAR Indicator and offset when finding MSI-X tables (Kishon Vijay Abraham I) - MSI-X tables are in local memory, not in the PCI address space. Update pcie-designware-ep to account for this (Kishon Vijay Abraham I) - Allow AM654 PCIe Endpoint to raise MSI-X interrupts (Kishon Vijay Abraham I) - Avoid using module parameter to determine irqtype for endpoint test (Kishon Vijay Abraham I) - Add ioctl to clear IRQ for endpoint test (Kishon Vijay Abraham I) - Add endpoint test 'e' option to clear IRQ (Kishon Vijay Abraham I) - Bump limit on number of endpoint test devices from 10 to 10,000 (Kishon Vijay Abraham I) - Use full pci-endpoint-test name in request_irq() for easier profiling (Kishon Vijay Abraham I) - Reduce log level of -EPROBE_DEFER error messages to debug (Thierry Reding) * remotes/lorenzo/pci/endpoint: misc: pci_endpoint_test: remove duplicate macro PCI_ENDPOINT_TEST_STATUS PCI: tegra: Print -EPROBE_DEFER error message at debug level misc: pci_endpoint_test: Use full pci-endpoint-test name in request_irq() misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices tools: PCI: Add 'e' to clear IRQ misc: pci_endpoint_test: Add ioctl to clear IRQ misc: pci_endpoint_test: Avoid using module parameter to determine irqtype PCI: keystone: Allow AM654 PCIe Endpoint to raise MSI-X interrupt PCI: dwc: Fix dw_pcie_ep_raise_msix_irq() to get correct MSI-X table address PCI: endpoint: Fix ->set_msix() to take BIR and offset as arguments misc: pci_endpoint_test: Add support to get DMA option from userspace tools: PCI: Add 'd' command line option to support DMA misc: pci_endpoint_test: Use streaming DMA APIs for buffer allocation PCI: endpoint: functions/pci-epf-test: Print throughput information PCI: endpoint: functions/pci-epf-test: Add DMA support to transfer data PCI: endpoint: Fix clearing start entry in configfs PCI: tegra: Add support for PCIe endpoint mode in Tegra194 dt-bindings: PCI: tegra: Add DT support for PCIe EP nodes in Tegra194 soc/tegra: bpmp: Update ABI header PCI: pci-epf-test: Add support to defer core initialization PCI: dwc: Add API to notify core initialization completion PCI: endpoint: Add notification for core init completion PCI: dwc: Refactor core initialization code for EP mode PCI: endpoint: Add core init notifying feature PCI: endpoint: Assign function number for each PF in EPC core PCI: endpoint: Protect concurrent access to pci_epf_ops with mutex PCI: endpoint: Fix for concurrent memory allocation in OB address region PCI: endpoint: Replace spinlock with mutex PCI: endpoint: Use notification chain mechanism to notify EPC events to EPF
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c144
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h12
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c712
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c402
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c28
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c137
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c35
10 files changed, 1280 insertions, 234 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0830dfcfa43a..03dcaf65d159 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -248,14 +248,37 @@ config PCI_MESON
implement the driver.
config PCIE_TEGRA194
- tristate "NVIDIA Tegra194 (and later) PCIe controller"
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
help
- Say Y here if you want support for DesignWare core based PCIe host
- controller found in NVIDIA Tegra194 SoC.
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe controllers"
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c702c494a2..790679fdfa48 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
@@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
+ .msix_capable = true,
.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
.bar_fixed_64bit = 1 << BAR_0,
.bar_fixed_size[2] = SZ_1M,
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index cfeccd7e9fff..1cdcbd102ce8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
int flags)
@@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
@@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_writel_dbi(pci, reg + 4, 0);
}
+ ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!ep->msix_cap)
return -EINVAL;
+ dw_pcie_dbi_ro_wr_en(pci);
+
reg = ep->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
- dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -409,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- u16 tbl_offset, bir;
- u32 bar_addr_upper, bar_addr_lower;
- u32 msg_addr_upper, msg_addr_lower;
+ struct pci_epf_bar *epf_bar;
u32 reg, msg_data, vec_ctrl;
- u64 tbl_addr, msg_addr, reg_u64;
- void __iomem *msix_tbl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
int ret;
+ u8 bir;
reg = ep->msix_cap + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- reg = PCI_BASE_ADDRESS_0 + (4 * bir);
- bar_addr_upper = 0;
- bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
- reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
- if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-
- tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
- tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
- tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-
- msix_tbl = ioremap(ep->phys_base + tbl_addr,
- PCI_MSIX_ENTRY_SIZE);
- if (!msix_tbl)
- return -EINVAL;
-
- msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
- msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
- vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ epf_bar = ep->epf_bar[bir];
+ msix_tbl = epf_bar->addr;
+ msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
- iounmap(msix_tbl);
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
}
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ aligned_offset = msg_addr & (epc->mem->page_size - 1);
+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem);
+ writel(msg_data, ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -492,19 +500,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_setup(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
int ret;
- u32 reg;
void *addr;
- u8 hdr_type;
- unsigned int nbars;
- unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
if (!pci->dbi_base || !pci->dbi_base2) {
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
@@ -563,13 +606,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->ep_init)
ep->ops->ep_init(ep);
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
- if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
- dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
- hdr_type);
- return -EIO;
- }
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@@ -587,23 +623,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
-
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
-
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
- dw_pcie_dbi_ro_wr_en(pci);
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
}
- dw_pcie_setup(pci);
-
- return 0;
+ return dw_pcie_ep_init_complete(ep);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index a22ea5982817..d6e1f397e6b0 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -233,6 +233,7 @@ struct dw_pcie_ep {
phys_addr_t msi_mem_phys;
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
struct dw_pcie_ops {
@@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cbe95f0ea0ca..ae30a2fd3716 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -53,6 +54,7 @@
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
@@ -60,19 +62,26 @@
#define APPL_INTR_STATUS_L0 0xC
#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
#define APPL_INTR_EN_L1_0_0 0x1C
#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
#define APPL_INTR_STATUS_L1_0_0 0x20
#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
#define APPL_INTR_STATUS_L1_1 0x2C
#define APPL_INTR_STATUS_L1_2 0x30
#define APPL_INTR_STATUS_L1_3 0x34
#define APPL_INTR_STATUS_L1_6 0x3C
#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
#define APPL_INTR_EN_L1_8_0 0x44
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
@@ -103,8 +112,12 @@
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+#define APPL_MSI_CTRL_1 0xAC
+
#define APPL_MSI_CTRL_2 0xB0
+#define APPL_LEGACY_INTX 0xB8
+
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
#define LTR_MST_NO_SNOOP_SHIFT 16
@@ -205,6 +218,13 @@
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
#define PORT_LOGIC_MSIX_DOORBELL 0x948
#define CAP_SPCIE_CAP_OFF 0x154
@@ -223,6 +243,13 @@
#define GEN3_CORE_CLK_FREQ 250000000
#define GEN4_CORE_CLK_FREQ 500000000
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
static const unsigned int pcie_gen_freq[] = {
GEN1_CORE_CLK_FREQ,
GEN2_CORE_CLK_FREQ,
@@ -260,6 +287,8 @@ struct tegra_pcie_dw {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
+ enum dw_pcie_device_mode mode;
+
bool supports_clkreq;
bool enable_cdm_check;
bool link_state;
@@ -283,6 +312,16 @@ struct tegra_pcie_dw {
struct phy **phys;
struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+};
+
+struct tegra_pcie_dw_of_data {
+ enum dw_pcie_device_mode mode;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
}
}
-static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
return IRQ_HANDLED;
}
-static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed;
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
- return tegra_pcie_rp_irq_handler(pcie);
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ int spurious = 1;
+ u32 val, tmp;
+
+ val = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ dw_pcie_ep_linkup(ep);
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+
+ if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ val);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
}
static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
pp->num_vectors = MAX_MSI_IRQS;
}
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ enable_irq(pcie->pex_rst_irq);
+
+ return 0;
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
@@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
+ if (pcie->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
return 0;
}
@@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
@@ -1427,8 +1657,396 @@ fail_pm_get_sync:
return ret;
}
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
+ goto fail_pll_init;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ /* Configure N_FTS & FTS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+ val &= ~(N_FTS_MASK << N_FTS_SHIFT);
+ val |= N_FTS_VAL << N_FTS_SHIFT;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+ val &= ~FTS_MASK;
+ val |= FTS_VAL;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+
+ /* Configure Max Speed from DT */
+ if (pcie->max_speed && pcie->max_speed != -EINVAL) {
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_SLS;
+ val |= pcie->max_speed;
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PCIe EP work thread string\n");
+ return -ENOMEM;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
@@ -1440,6 +2058,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
int ret;
u32 i;
+ data = of_device_get_match_data(dev);
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1449,19 +2069,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci->ops = &tegra_dw_pcie_ops;
pp = &pci->pp;
pcie->dev = &pdev->dev;
+ pcie->mode = (enum dw_pcie_device_mode)data->mode;
ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
return ret;
}
ret = tegra_pcie_get_slot_regulators(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
return ret;
}
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
if (IS_ERR(pcie->pex_ctl_supply)) {
ret = PTR_ERR(pcie->pex_ctl_supply);
@@ -1557,24 +2195,49 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
- IRQF_SHARED, "tegra-pcie-intr", pcie);
- if (ret) {
- dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
- return ret;
- }
-
pcie->bpmp = tegra_bpmp_get(dev);
if (IS_ERR(pcie->bpmp))
return PTR_ERR(pcie->bpmp);
platform_set_drvdata(pdev, pcie);
- ret = tegra_pcie_config_rp(pcie);
- if (ret && ret != -ENOMEDIUM)
- goto fail;
- else
- return 0;
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ }
fail:
tegra_bpmp_put(pcie->bpmp);
@@ -1593,6 +2256,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
return 0;
}
@@ -1697,9 +2362,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
__deinit_controller(pcie);
}
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
+ .data = &tegra_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra_pcie_dw_ep_of_data,
},
{},
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5d74f81ddfe4..60330f3e3751 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -8,6 +8,7 @@
#include <linux/crc32.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define FLAG_USE_DMA BIT(0)
+
#define TIMER_RESOLUTION 1
static struct workqueue_struct *kpcitest_workqueue;
@@ -47,7 +50,11 @@ struct pci_epf_test {
void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
+ size_t msix_table_offset;
struct delayed_work cmd_handler;
+ struct dma_chan *dma_chan;
+ struct completion transfer_complete;
+ bool dma_supported;
const struct pci_epc_features *epc_features;
};
@@ -61,6 +68,7 @@ struct pci_epf_test_reg {
u32 checksum;
u32 irq_type;
u32 irq_number;
+ u32 flags;
} __packed;
static struct pci_epf_header test_header = {
@@ -72,13 +80,156 @@ static struct pci_epf_header test_header = {
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+static void pci_epf_test_dma_callback(void *param)
+{
+ struct pci_epf_test *epf_test = param;
+
+ complete(&epf_test->transfer_complete);
+}
+
+/**
+ * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
+ * data between PCIe EP and remote PCIe RC
+ * @epf_test: the EPF test device that performs the data transfer operation
+ * @dma_dst: The destination address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @dma_src: The source address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @len: The size of the data transfer
+ *
+ * Function that uses dmaengine API to transfer data between PCIe EP and remote
+ * PCIe RC. The source and destination address can be a physical address given
+ * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
+ *
+ * The function returns '0' on success and negative value on failure.
+ */
+static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len)
+{
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_chan *chan = epf_test->dma_chan;
+ struct pci_epf *epf = epf_test->epf;
+ struct dma_async_tx_descriptor *tx;
+ struct device *dev = &epf->dev;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (IS_ERR_OR_NULL(chan)) {
+ dev_err(dev, "Invalid DMA memcpy channel\n");
+ return -EINVAL;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ tx->callback = pci_epf_test_dma_callback;
+ tx->callback_param = epf_test;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&epf_test->transfer_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
+ if (ret < 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
+ * @epf_test: the EPF test device that performs data transfer operation
+ *
+ * Function to initialize EPF test DMA channel.
+ */
+static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+{
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DMA channel\n");
+ return ret;
+ }
+ init_completion(&epf_test->transfer_complete);
+
+ epf_test->dma_chan = dma_chan;
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
+ * @epf: the EPF test device that performs data transfer operation
+ *
+ * Helper to cleanup EPF test DMA channel.
+ */
+static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+{
+ dma_release_channel(epf_test->dma_chan);
+ epf_test->dma_chan = NULL;
+}
+
+static void pci_epf_test_print_rate(const char *ops, u64 size,
+ struct timespec64 *start,
+ struct timespec64 *end, bool dma)
+{
+ struct timespec64 ts;
+ u64 rate, ns;
+
+ ts = timespec64_sub(*end, *start);
+
+ /* convert both size (stored in 'rate') and time in terms of 'ns' */
+ ns = timespec64_to_ns(&ts);
+ rate = size * NSEC_PER_SEC;
+
+ /* Divide both size (stored in 'rate') and ns by a common factor */
+ while (ns > UINT_MAX) {
+ rate >>= 1;
+ ns >>= 1;
+ }
+
+ if (!ns)
+ return;
+
+ /* calculate the rate */
+ do_div(rate, (uint32_t)ns);
+
+ pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
+ "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
+ (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
+}
+
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
{
int ret;
+ bool use_dma;
void __iomem *src_addr;
void __iomem *dst_addr;
phys_addr_t src_phys_addr;
phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
@@ -117,8 +268,26 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_dst_addr;
}
- memcpy(dst_addr, src_addr, reg->size);
+ ktime_get_ts64(&start);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ } else {
+ memcpy(dst_addr, src_addr, reg->size);
+ }
+ ktime_get_ts64(&end);
+ pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
+err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
err_dst_addr:
@@ -140,10 +309,14 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
void __iomem *src_addr;
void *buf;
u32 crc32;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -169,12 +342,44 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err_map_addr;
}
- memcpy_fromio(buf, src_addr, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_dma_map;
+ }
+
+ dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev, "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
+ DMA_FROM_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, src_addr, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
crc32 = crc32_le(~0, buf, reg->size);
if (crc32 != reg->checksum)
ret = -EIO;
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -192,10 +397,14 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
int ret;
void __iomem *dst_addr;
void *buf;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t src_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -224,7 +433,38 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
get_random_bytes(buf, reg->size);
reg->checksum = crc32_le(~0, buf, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev, "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, reg->size,
+ DMA_TO_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(dst_addr, buf, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -232,6 +472,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
*/
usleep_range(1000, 2000);
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -360,14 +601,6 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_linkup(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
-
- queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
- msecs_to_jiffies(1));
-}
-
static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -376,6 +609,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
@@ -424,11 +658,90 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epf_header *header = epf->header;
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ bool msix_capable = false;
+ bool msi_capable = true;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no);
+ if (epc_features) {
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ epf_test->test_reg_bar,
+ epf_test->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int ret;
+
+ switch (val) {
+ case CORE_INIT:
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
+ return NOTIFY_BAD;
+ break;
+
+ case LINK_UP:
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+ break;
+
+ default:
+ dev_err(&epf->dev, "Invalid EPF test notifier event\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+}
+
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
struct pci_epf_bar *epf_bar;
+ size_t msix_table_size = 0;
+ size_t test_reg_bar_size;
+ size_t pba_size = 0;
+ bool msix_capable;
void *base;
int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
@@ -437,13 +750,25 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- if (epc_features->bar_fixed_size[test_reg_bar])
+ test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ epf_test->msix_table_offset = test_reg_bar_size;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+ }
+ test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
+
+ if (epc_features->bar_fixed_size[test_reg_bar]) {
+ if (test_reg_size > bar_size[test_reg_bar])
+ return -ENOMEM;
test_reg_size = bar_size[test_reg_bar];
- else
- test_reg_size = sizeof(struct pci_epf_test_reg);
+ }
- base = pci_epf_alloc_space(epf, test_reg_size,
- test_reg_bar, epc_features->align);
+ base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
+ epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -492,14 +817,11 @@ static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epf_header *header = epf->header;
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- struct device *dev = &epf->dev;
bool linkup_notifier = false;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -507,8 +829,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epc_features = pci_epc_get_features(epc, epf->func_no);
if (epc_features) {
linkup_notifier = epc_features->linkup_notifier;
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
+ core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
pci_epf_configure_bar(epf, epc_features);
}
@@ -516,38 +837,28 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
- }
-
ret = pci_epf_test_alloc_space(epf);
if (ret)
return ret;
- ret = pci_epf_test_set_bar(epf);
- if (ret)
- return ret;
-
- if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret) {
- dev_err(dev, "MSI configuration failed\n");
+ if (!core_init_notifier) {
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
return ret;
- }
}
- if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
- if (ret) {
- dev_err(dev, "MSI-X configuration failed\n");
- return ret;
- }
- }
+ epf_test->dma_supported = true;
- if (!linkup_notifier)
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
+
+ if (linkup_notifier) {
+ epf->nb.notifier_call = pci_epf_test_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+ } else {
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+ }
return 0;
}
@@ -580,7 +891,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
static struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
- .linkup = pci_epf_test_linkup,
};
static struct pci_epf_driver test_driver = {
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d1288a0bd530..55edce50be96 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -29,7 +29,6 @@ struct pci_epc_group {
struct config_group group;
struct pci_epc *epc;
bool start;
- unsigned long function_num_map;
};
static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
@@ -58,6 +57,7 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
if (!start) {
pci_epc_stop(epc);
+ epc_group->start = 0;
return len;
}
@@ -89,37 +89,22 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct config_item *epf_item)
{
int ret;
- u32 func_no = 0;
struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- func_no = find_first_zero_bit(&epc_group->function_num_map,
- BITS_PER_LONG);
- if (func_no >= BITS_PER_LONG)
- return -EINVAL;
-
- set_bit(func_no, &epc_group->function_num_map);
- epf->func_no = func_no;
-
ret = pci_epc_add_epf(epc, epf);
if (ret)
- goto err_add_epf;
+ return ret;
ret = pci_epf_bind(epf);
- if (ret)
- goto err_epf_bind;
+ if (ret) {
+ pci_epc_remove_epf(epc, epf);
+ return ret;
+ }
return 0;
-
-err_epf_bind:
- pci_epc_remove_epf(epc, epf);
-
-err_add_epf:
- clear_bit(func_no, &epc_group->function_num_map);
-
- return ret;
}
static void pci_epc_epf_unlink(struct config_item *epc_item,
@@ -134,7 +119,6 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
- clear_bit(epf->func_no, &epc_group->function_num_map);
pci_epf_unbind(epf);
pci_epc_remove_epf(epc, epf);
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 2091508c1620..82ba0dc7f2f5 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -120,7 +120,6 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no)
{
const struct pci_epc_features *epc_features;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
@@ -128,9 +127,9 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
if (!epc->ops->get_features)
return NULL;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc_features = epc->ops->get_features(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return epc_features;
}
@@ -144,14 +143,12 @@ EXPORT_SYMBOL_GPL(pci_epc_get_features);
*/
void pci_epc_stop(struct pci_epc *epc)
{
- unsigned long flags;
-
if (IS_ERR(epc) || !epc->ops->stop)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->stop(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_stop);
@@ -164,7 +161,6 @@ EXPORT_SYMBOL_GPL(pci_epc_stop);
int pci_epc_start(struct pci_epc *epc)
{
int ret;
- unsigned long flags;
if (IS_ERR(epc))
return -EINVAL;
@@ -172,9 +168,9 @@ int pci_epc_start(struct pci_epc *epc)
if (!epc->ops->start)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->start(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -193,7 +189,6 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -201,9 +196,9 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (!epc->ops->raise_irq)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -219,7 +214,6 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -227,9 +221,9 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msi)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msi(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -252,7 +246,6 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
{
int ret;
u8 encode_int;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts > 32)
@@ -263,9 +256,9 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
encode_int = order_base_2(interrupts);
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_msi(epc, func_no, encode_int);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -281,7 +274,6 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -289,9 +281,9 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msix(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -305,13 +297,15 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the endpoint function number in the EPC device
* @interrupts: number of MSI-X interrupts required by the EPF
+ * @bir: BAR where the MSI-X table resides
+ * @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts < 1 || interrupts > 2048)
@@ -320,9 +314,9 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!epc->ops->set_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -339,17 +333,15 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
if (!epc->ops->unmap_addr)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->unmap_addr(epc, func_no, phys_addr);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -367,7 +359,6 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -375,9 +366,9 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (!epc->ops->map_addr)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -394,8 +385,6 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
(epf_bar->barno == BAR_5 &&
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
@@ -404,9 +393,9 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->clear_bar)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->clear_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -422,7 +411,6 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
int ret;
- unsigned long irq_flags;
int flags = epf_bar->flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -437,9 +425,9 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->set_bar)
return 0;
- spin_lock_irqsave(&epc->lock, irq_flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, irq_flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -460,7 +448,6 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *header)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -468,9 +455,9 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (!epc->ops->write_header)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->write_header(epc, func_no, header);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -487,7 +474,8 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
*/
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
+ u32 func_no;
+ int ret = 0;
if (epf->epc)
return -EBUSY;
@@ -495,16 +483,30 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
if (IS_ERR(epc))
return -EINVAL;
- if (epf->func_no > epc->max_functions - 1)
- return -EINVAL;
+ mutex_lock(&epc->lock);
+ func_no = find_first_zero_bit(&epc->function_num_map,
+ BITS_PER_LONG);
+ if (func_no >= BITS_PER_LONG) {
+ ret = -EINVAL;
+ goto ret;
+ }
+ if (func_no > epc->max_functions - 1) {
+ dev_err(&epc->dev, "Exceeding max supported Function Number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ set_bit(func_no, &epc->function_num_map);
+ epf->func_no = func_no;
epf->epc = epc;
- spin_lock_irqsave(&epc->lock, flags);
list_add_tail(&epf->list, &epc->pci_epf);
- spin_unlock_irqrestore(&epc->lock, flags);
- return 0;
+ret:
+ mutex_unlock(&epc->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epc_add_epf);
@@ -517,15 +519,14 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*/
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
-
if (!epc || IS_ERR(epc) || !epf)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ clear_bit(epf->func_no, &epc->function_num_map);
list_del(&epf->list);
epf->epc = NULL;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -539,20 +540,31 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
*/
void pci_epc_linkup(struct pci_epc *epc)
{
- unsigned long flags;
- struct pci_epf *epf;
-
if (!epc || IS_ERR(epc))
return;
- spin_lock_irqsave(&epc->lock, flags);
- list_for_each_entry(epf, &epc->pci_epf, list)
- pci_epf_linkup(epf);
- spin_unlock_irqrestore(&epc->lock, flags);
+ atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_init_notify() - Notify the EPF device that EPC device's core
+ * initialization is completed.
+ * @epc: the EPC device whose core initialization is completeds
+ *
+ * Invoke to Notify the EPF device that the EPC device's initialization
+ * is completed.
+ */
+void pci_epc_init_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_init_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
@@ -610,8 +622,9 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
goto err_ret;
}
- spin_lock_init(&epc->lock);
+ mutex_init(&epc->lock);
INIT_LIST_HEAD(&epc->pci_epf);
+ ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index d2b174ce15de..abfac1109a13 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index fb1306de8f40..244e00f48c5c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,26 +21,6 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
- * pci_epf_linkup() - Notify the function driver that EPC device has
- * established a connection with the Root Complex.
- * @epf: the EPF device bound to the EPC device which has established
- * the connection with the host
- *
- * Invoke to notify the function driver that EPC device has established
- * a connection with the Root Complex.
- */
-void pci_epf_linkup(struct pci_epf *epf)
-{
- if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
- return;
- }
-
- epf->driver->ops->linkup(epf);
-}
-EXPORT_SYMBOL_GPL(pci_epf_linkup);
-
-/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -55,7 +35,9 @@ void pci_epf_unbind(struct pci_epf *epf)
return;
}
+ mutex_lock(&epf->lock);
epf->driver->ops->unbind(epf);
+ mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
EXPORT_SYMBOL_GPL(pci_epf_unbind);
@@ -69,6 +51,8 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ int ret;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return -EINVAL;
@@ -77,7 +61,11 @@ int pci_epf_bind(struct pci_epf *epf)
if (!try_module_get(epf->driver->owner))
return -EAGAIN;
- return epf->driver->ops->bind(epf);
+ mutex_lock(&epf->lock);
+ ret = epf->driver->ops->bind(epf);
+ mutex_unlock(&epf->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
@@ -99,6 +87,7 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
epf->bar[bar].phys_addr);
epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].addr = NULL;
epf->bar[bar].size = 0;
epf->bar[bar].barno = 0;
epf->bar[bar].flags = 0;
@@ -135,6 +124,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].addr = space;
epf->bar[bar].size = size;
epf->bar[bar].barno = bar;
epf->bar[bar].flags |= upper_32_bits(size) ?
@@ -214,7 +204,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (!driver->ops)
return -EINVAL;
- if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+ if (!driver->ops->bind || !driver->ops->unbind)
return -EINVAL;
driver->driver.bus = &pci_epf_bus_type;
@@ -272,6 +262,7 @@ struct pci_epf *pci_epf_create(const char *name)
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
dev->type = &pci_epf_type;
+ mutex_init(&epf->lock);
ret = dev_set_name(dev, "%s", name);
if (ret) {