summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2014-10-10 00:26:47 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 04:25:53 +0200
commitf7426b983a6a353cf21e5733e84458219c4a817e (patch)
treec263c1bb7537597b6ca67e2ebed2d1b4a03d6ce1 /mm
parentarm64: add atomic pool for non-coherent and CMA allocations (diff)
downloadlinux-f7426b983a6a353cf21e5733e84458219c4a817e.tar.xz
linux-f7426b983a6a353cf21e5733e84458219c4a817e.zip
mm: cma: adjust address limit to avoid hitting low/high memory boundary
Russell King recently noticed that limiting default CMA region only to low memory on ARM architecture causes serious memory management issues with machines having a lot of memory (which is mainly available as high memory). More information can be found the following thread: http://thread.gmane.org/gmane.linux.ports.arm.kernel/348441/ Those two patches removes this limit letting kernel to put default CMA region into high memory when this is possible (there is enough high memory available and architecture specific DMA limit fits). This should solve strange OOM issues on systems with lots of RAM (i.e. >1GiB) and large (>256M) CMA area. This patch (of 2): Automatically allocated regions should not cross low/high memory boundary, because such regions cannot be later correctly initialized due to spanning across two memory zones. This patch adds a check for this case and a simple code for moving region to low memory if automatically selected address might not fit completely into high memory. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Daniel Drake <drake@endlessm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/cma.c b/mm/cma.c
index c17751c0dcaf..474c644a0dc6 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/cma.h>
+#include <linux/highmem.h>
struct cma {
unsigned long base_pfn;
@@ -163,6 +164,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
bool fixed, struct cma **res_cma)
{
struct cma *cma;
+ phys_addr_t memblock_end = memblock_end_of_DRAM();
+ phys_addr_t highmem_start = __pa(high_memory);
int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
@@ -196,6 +199,24 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
return -EINVAL;
+ /*
+ * adjust limit to avoid crossing low/high memory boundary for
+ * automatically allocated regions
+ */
+ if (((limit == 0 || limit > memblock_end) &&
+ (memblock_end - size < highmem_start &&
+ memblock_end > highmem_start)) ||
+ (!fixed && limit > highmem_start && limit - size < highmem_start)) {
+ limit = highmem_start;
+ }
+
+ if (fixed && base < highmem_start && base+size > highmem_start) {
+ ret = -EINVAL;
+ pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
+ (unsigned long)base, (unsigned long)highmem_start);
+ goto err;
+ }
+
/* Reserve memory */
if (base && fixed) {
if (memblock_is_region_reserved(base, size) ||