summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-09-13 02:45:27 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-13 02:45:27 +0200
commitfc486b03cae382601b366ab460b05e1a01bf69cd (patch)
tree4b7bcb5ddebfc24662bad6eda9b6f3c9a515df10 /arch/arm
parentMerge tag 'char-misc-3.17-rc5' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff)
parentxen/arm: remove mach_to_phys rbtree (diff)
downloadlinux-fc486b03cae382601b366ab460b05e1a01bf69cd.tar.xz
linux-fc486b03cae382601b366ab460b05e1a01bf69cd.zip
Merge tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen ARM bugfix from Stefano Stabellini: "The patches fix the "xen_add_mach_to_phys_entry: cannot add" bug that has been affecting xen on arm and arm64 guests since 3.16. They require a few hypervisor side changes that just went in xen-unstable. A couple of days ago David sent out a pull request with a few other Xen fixes (it is already in master). Sorry we didn't synchronized better among us" * tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/arm: remove mach_to_phys rbtree xen/arm: reimplement xen_dma_unmap_page & friends xen/arm: introduce XENFEAT_grant_map_identity
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm/xen/p2m.c66
6 files changed, 217 insertions, 93 deletions
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+ struct dma_attrs *attrs);
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
- unsigned long pfn;
-
- if (phys_to_mach.rb_node != NULL) {
- pfn = __mfn_to_pfn(mfn);
- if (pfn != INVALID_P2M_ENTRY)
- return pfn;
- }
-
return mfn;
}
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..1f85bfe6b470 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 98544c5f86e9..0e15f011f9c8 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
+
+ if (!xen_feature(XENFEAT_grant_map_identity)) {
+ pr_warn("Please upgrade your Xen.\n"
+ "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+ }
+
if (xen_feature(XENFEAT_dom0))
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644
index 000000000000..3b99860fd7ae
--- /dev/null
+++ b/arch/arm/xen/mm32.c
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+ struct page *page;
+ unsigned long virt;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+ return 0;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL) {
+ pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
+
+ virt = (unsigned long)__va(page_to_phys(page));
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+ ptep = pte_offset_kernel(pmdp, virt);
+
+ per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+ per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+ return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (alloc_xen_mm32_scratch_page(cpu))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+ .notifier_call = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+ unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+ pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+ *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+ local_flush_tlb_kernel_page(virt);
+
+ return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+ put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+{
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ do {
+ size_t len = left;
+ void *vaddr;
+
+ if (!pfn_valid(pfn))
+ {
+ /* Cannot map the page, we don't know its physical address.
+ * Return and hope for the best */
+ if (!xen_feature(XENFEAT_grant_map_identity))
+ return;
+ vaddr = xen_mm32_remap_page(handle) + offset;
+ op(vaddr, len, dir);
+ xen_mm32_unmap(vaddr - offset);
+ } else {
+ struct page *page = pfn_to_page(pfn);
+
+ if (PageHighMem(page)) {
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ if (cache_is_vipt_nonaliasing()) {
+ vaddr = kmap_atomic(page);
+ op(vaddr + offset, len, dir);
+ kunmap_atomic(vaddr);
+ } else {
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ op(vaddr + offset, len, dir);
+ kunmap_high(page);
+ }
+ }
+ } else {
+ vaddr = page_address(page) + offset;
+ op(vaddr, len, dir);
+ }
+ }
+
+ offset = 0;
+ pfn++;
+ left -= len;
+ } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ /* Cannot use __dma_page_dev_to_cpu because we don't have a
+ * struct page for handle */
+
+ if (dir != DMA_TO_DEVICE)
+ outer_inv_range(handle, handle + size);
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(handle, handle + size);
+ } else {
+ outer_clean_range(handle, handle + size);
+ }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+
+{
+ if (!__generic_dma_ops(hwdev)->unmap_page)
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ return;
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+ return;
+ __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+ int cpu;
+
+ if (!xen_initial_domain())
+ return 0;
+
+ register_cpu_notifier(&xen_mm32_cpu_notifier);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (alloc_xen_mm32_scratch_page(cpu)) {
+ put_online_cpus();
+ unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+ return -ENOMEM;
+ }
+ }
+ put_online_cpus();
+
+ return 0;
+}
+arch_initcall(xen_mm32_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 97baf4427817..054857776254 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
unsigned long pfn;
unsigned long mfn;
unsigned long nr_pages;
- struct rb_node rbnode_mach;
struct rb_node rbnode_phys;
};
static rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT;
EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
{
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
- if (new->mfn == entry->mfn)
- goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(__pfn_to_mfn);
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
- struct rb_node **link = &mach_to_phys.rb_node;
- struct rb_node *parent = NULL;
- struct xen_p2m_entry *entry;
- int rc = 0;
-
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
- if (new->mfn == entry->mfn)
- goto err_out;
- if (new->pfn == entry->pfn)
- goto err_out;
-
- if (new->mfn < entry->mfn)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
- rb_link_node(&new->rbnode_mach, parent, link);
- rb_insert_color(&new->rbnode_mach, &mach_to_phys);
- goto out;
-
-err_out:
- rc = -EINVAL;
- pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
- __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
- return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
- struct rb_node *n = mach_to_phys.rb_node;
- struct xen_p2m_entry *entry;
- unsigned long irqflags;
-
- read_lock_irqsave(&p2m_lock, irqflags);
- while (n) {
- entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
- if (entry->mfn <= mfn &&
- entry->mfn + entry->nr_pages > mfn) {
- read_unlock_irqrestore(&p2m_lock, irqflags);
- return entry->pfn + (mfn - entry->mfn);
- }
- if (mfn < entry->mfn)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
- read_unlock_irqrestore(&p2m_lock, irqflags);
-
- return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&
p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
- rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
write_unlock_irqrestore(&p2m_lock, irqflags);
kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags);
- if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
- (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+ if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
write_unlock_irqrestore(&p2m_lock, irqflags);
return false;
}