summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-03-01 08:44:32 +0100
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2021-03-17 01:40:54 +0100
commita98f565462f0fca9096e8f53933364dc2a74bc90 (patch)
tree37f967e3abfc986e7af93bcd091c83261a338222
parentswiotlb: lift the double initialization protection from xen-swiotlb (diff)
downloadlinux-a98f565462f0fca9096e8f53933364dc2a74bc90.tar.xz
linux-a98f565462f0fca9096e8f53933364dc2a74bc90.zip
xen-swiotlb: split xen_swiotlb_init
Split xen_swiotlb_init into a normal an an early case. That makes both much simpler and more readable, and also allows marking the early code as __init and x86-only. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--drivers/xen/swiotlb-xen.c124
-rw-r--r--include/xen/swiotlb-xen.h3
4 files changed, 75 insertions, 58 deletions
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 467fa225c3d0..aae950cd053f 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -140,7 +140,7 @@ static int __init xen_mm_init(void)
struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
return 0;
- xen_swiotlb_init(1, false);
+ xen_swiotlb_init();
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 19ae3e4fe4e9..54f9aa7e8457 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void)
void __init pci_xen_swiotlb_init(void)
{
if (xen_swiotlb) {
- xen_swiotlb_init(1, true /* early */);
+ xen_swiotlb_init_early();
dma_ops = &xen_swiotlb_dma_ops;
#ifdef CONFIG_PCI
@@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void)
if (xen_swiotlb)
return 0;
- rc = xen_swiotlb_init(1, false /* late */);
+ rc = xen_swiotlb_init();
if (rc)
return rc;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 060eeb056486..00adeb95ebb9 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -156,96 +156,112 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-int __ref xen_swiotlb_init(int verbose, bool early)
+int __ref xen_swiotlb_init(void)
{
- unsigned long bytes, order;
- int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
+ unsigned long nslabs, bytes, order;
unsigned int repeat = 3;
+ int rc = -ENOMEM;
char *start;
- unsigned long nslabs;
nslabs = swiotlb_nr_tbl();
-retry:
if (!nslabs)
nslabs = DEFAULT_NSLABS;
+retry:
+ m_ret = XEN_SWIOTLB_ENOMEM;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
/*
* Get IO TLB memory from any location.
*/
- if (early) {
- start = memblock_alloc(PAGE_ALIGN(bytes),
- PAGE_SIZE);
- if (!start)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
- } else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- start = (void *)xen_get_swiotlb_free_pages(order);
- if (start)
- break;
- order--;
- }
- if (order != get_order(bytes)) {
- pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
- (PAGE_SIZE << order) >> 20);
- nslabs = SLABS_PER_PAGE << order;
- bytes = nslabs << IO_TLB_SHIFT;
- }
+ while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+ start = (void *)xen_get_swiotlb_free_pages(order);
+ if (start)
+ break;
+ order--;
}
- if (!start) {
- m_ret = XEN_SWIOTLB_ENOMEM;
+ if (!start)
goto error;
+ if (order != get_order(bytes)) {
+ pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
+ (PAGE_SIZE << order) >> 20);
+ nslabs = SLABS_PER_PAGE << order;
+ bytes = nslabs << IO_TLB_SHIFT;
}
+
/*
* And replace that memory with pages under 4GB.
*/
- rc = xen_swiotlb_fixup(start,
- bytes,
- nslabs);
+ rc = xen_swiotlb_fixup(start, bytes, nslabs);
if (rc) {
- if (early)
- memblock_free(__pa(start),
- PAGE_ALIGN(bytes));
- else {
- free_pages((unsigned long)start, order);
- start = NULL;
- }
+ free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
- if (early) {
- if (swiotlb_init_with_tbl(start, nslabs,
- verbose))
- panic("Cannot allocate SWIOTLB buffer");
- rc = 0;
- } else
- rc = swiotlb_late_init_with_tbl(start, nslabs);
-
- if (!rc)
- swiotlb_set_max_segment(PAGE_SIZE);
-
- return rc;
+ rc = swiotlb_late_init_with_tbl(start, nslabs);
+ if (rc)
+ return rc;
+ swiotlb_set_max_segment(PAGE_SIZE);
+ return 0;
error:
if (repeat--) {
- nslabs = max(1024UL, /* Min is 2MB */
- (nslabs >> 1));
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
(nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- if (early)
- panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
- else
- free_pages((unsigned long)start, order);
+ free_pages((unsigned long)start, order);
return rc;
}
+#ifdef CONFIG_X86
+void __init xen_swiotlb_init_early(void)
+{
+ unsigned long nslabs, bytes;
+ unsigned int repeat = 3;
+ char *start;
+ int rc;
+
+ nslabs = swiotlb_nr_tbl();
+ if (!nslabs)
+ nslabs = DEFAULT_NSLABS;
+retry:
+ /*
+ * Get IO TLB memory from any location.
+ */
+ bytes = nslabs << IO_TLB_SHIFT;
+ start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+ if (!start)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+
+ /*
+ * And replace that memory with pages under 4GB.
+ */
+ rc = xen_swiotlb_fixup(start, bytes, nslabs);
+ if (rc) {
+ memblock_free(__pa(start), PAGE_ALIGN(bytes));
+ if (repeat--) {
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
+ pr_info("Lowering to %luMB\n",
+ (nslabs << IO_TLB_SHIFT) >> 20);
+ goto retry;
+ }
+ panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
+ }
+
+ if (swiotlb_init_with_tbl(start, nslabs, false))
+ panic("Cannot allocate SWIOTLB buffer");
+ swiotlb_set_max_segment(PAGE_SIZE);
+}
+#endif /* CONFIG_X86 */
+
static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index d5eaf9d682b8..6206b1ec9916 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -9,7 +9,8 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir);
-extern int xen_swiotlb_init(int verbose, bool early);
+int xen_swiotlb_init(void);
+void __init xen_swiotlb_init_early(void);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
#endif /* __LINUX_SWIOTLB_XEN_H */