summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Kconfig.debug26
-rw-r--r--mm/Makefile5
-rw-r--r--mm/allocpercpu.c34
-rw-r--r--mm/backing-dev.c26
-rw-r--r--mm/bootmem.c35
-rw-r--r--mm/debug-pagealloc.c129
-rw-r--r--mm/filemap.c28
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/highmem.c110
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/internal.h8
-rw-r--r--mm/memcontrol.c687
-rw-r--r--mm/memory.c39
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/nommu.c52
-rw-r--r--mm/oom_kill.c13
-rw-r--r--mm/page-writeback.c46
-rw-r--r--mm/page_alloc.c36
-rw-r--r--mm/page_cgroup.c37
-rw-r--r--mm/pdflush.c2
-rw-r--r--mm/percpu.c1326
-rw-r--r--mm/readahead.c65
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slab.c7
-rw-r--r--mm/slob.c45
-rw-r--r--mm/slub.c83
-rw-r--r--mm/sparse.c4
-rw-r--r--mm/swap.c27
-rw-r--r--mm/truncate.c10
-rw-r--r--mm/util.c30
-rw-r--r--mm/vmalloc.c116
-rw-r--r--mm/vmscan.c109
-rw-r--r--mm/vmstat.c18
35 files changed, 2598 insertions, 596 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index a5b77811fdf2..b53427ad30a3 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -206,7 +206,6 @@ config VIRT_TO_BUS
config UNEVICTABLE_LRU
bool "Add LRU list to track non-evictable pages"
default y
- depends on MMU
help
Keeps unevictable pages off of the active and inactive pageout
lists, so kswapd will not waste CPU time or have its balancing
@@ -214,5 +213,13 @@ config UNEVICTABLE_LRU
will use one page flag and increase the code size a little,
say Y unless you know what you are doing.
+config HAVE_MLOCK
+ bool
+ default y if MMU=y
+
+config HAVE_MLOCKED_PAGE_BIT
+ bool
+ default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
+
config MMU_NOTIFIER
bool
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
new file mode 100644
index 000000000000..bb01e298f260
--- /dev/null
+++ b/mm/Kconfig.debug
@@ -0,0 +1,26 @@
+config DEBUG_PAGEALLOC
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ depends on !HIBERNATION || !PPC && !SPARC
+ ---help---
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
+
+config WANT_PAGE_DEBUG_FLAGS
+ bool
+
+config PAGE_POISONING
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ depends on !HIBERNATION
+ select DEBUG_PAGEALLOC
+ select WANT_PAGE_DEBUG_FLAGS
+ help
+ Fill the pages with poison patterns after free_pages() and verify
+ the patterns before alloc_pages(). This results in a large slowdown,
+ but helps to find certain types of memory corruptions.
+
+ This option cannot enalbe with hibernation. Otherwise, it will get
+ wrong messages for memory corruption because the free pages are not
+ saved to the suspend image.
diff --git a/mm/Makefile b/mm/Makefile
index 72255be57f89..ec73c68b6015 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -24,12 +24,17 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
+ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+obj-$(CONFIG_SMP) += percpu.o
+else
obj-$(CONFIG_SMP) += allocpercpu.o
+endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 4297bc41bfd2..139d5b7b6621 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
/**
- * percpu_alloc_mask - initial setup of per-cpu data
+ * alloc_percpu - initial setup of per-cpu data
* @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-data for cpu's selected through mask bits
+ * @align: alignment
*
- * Populating per-cpu data for all online cpu's would be a typical use case,
- * which is simplified by the percpu_alloc() wrapper.
- * Per-cpu objects are populated with zeroed buffers.
+ * Allocate dynamic percpu area. Percpu objects are populated with
+ * zeroed buffers.
*/
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *__alloc_percpu(size_t size, size_t align)
{
/*
* We allocate whole cache lines to avoid false sharing
*/
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, gfp);
+ void *pdata = kzalloc(sz, GFP_KERNEL);
void *__pdata = __percpu_disguise(pdata);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+
if (unlikely(!pdata))
return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
+ if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
+ &cpu_possible_map)))
return __pdata;
kfree(pdata);
return NULL;
}
-EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
+EXPORT_SYMBOL_GPL(__alloc_percpu);
/**
- * percpu_free - final cleanup of per-cpu data
+ * free_percpu - final cleanup of per-cpu data
* @__pdata: object to clean up
*
* We simply clean up any per-cpu object left. No need for the client to
* track and specify through a bis mask which per-cpu objects are to free.
*/
-void percpu_free(void *__pdata)
+void free_percpu(void *__pdata)
{
if (unlikely(!__pdata))
return;
- __percpu_depopulate_mask(__pdata, &cpu_possible_map);
+ __percpu_depopulate_mask(__pdata, cpu_possible_mask);
kfree(__percpu_disguise(__pdata));
}
-EXPORT_SYMBOL_GPL(percpu_free);
+EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8e8587444132..be68c956a660 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -2,11 +2,24 @@
#include <linux/wait.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/device.h>
+void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+{
+}
+EXPORT_SYMBOL(default_unplug_io_fn);
+
+struct backing_dev_info default_backing_dev_info = {
+ .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
+ .state = 0,
+ .capabilities = BDI_CAP_MAP_COPY,
+ .unplug_io_fn = default_unplug_io_fn,
+};
+EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
@@ -166,9 +179,20 @@ static __init int bdi_class_init(void)
bdi_debug_init();
return 0;
}
-
postcore_initcall(bdi_class_init);
+static int __init default_bdi_init(void)
+{
+ int err;
+
+ err = bdi_init(&default_backing_dev_info);
+ if (!err)
+ bdi_register(&default_backing_dev_info, NULL, "default");
+
+ return err;
+}
+subsys_initcall(default_bdi_init);
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 51a0ccf61e0e..daf92713f7de 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -382,7 +382,6 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
}
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
/**
* reserve_bootmem - mark a page range as usable
* @addr: starting address of the range
@@ -403,7 +402,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
unsigned long step)
@@ -429,8 +427,8 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
}
static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
{
unsigned long fallback = 0;
unsigned long min, max, start, sidx, midx, step;
@@ -530,17 +528,34 @@ find_block:
return NULL;
}
+static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
+ unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+#ifdef CONFIG_HAVE_ARCH_BOOTMEM
+ bootmem_data_t *p_bdata;
+
+ p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
+ if (p_bdata)
+ return alloc_bootmem_core(p_bdata, size, align, goal, limit);
+#endif
+ return NULL;
+}
+
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
bootmem_data_t *bdata;
+ void *region;
restart:
- list_for_each_entry(bdata, &bdata_list, list) {
- void *region;
+ region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
+ if (region)
+ return region;
+ list_for_each_entry(bdata, &bdata_list, list) {
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
continue;
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
@@ -618,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
{
void *ptr;
+ ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+ if (ptr)
+ return ptr;
+
ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
if (ptr)
return ptr;
@@ -674,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
{
void *ptr;
+ ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
+ if (ptr)
+ return ptr;
+
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
new file mode 100644
index 000000000000..a1e3324de2b5
--- /dev/null
+++ b/mm/debug-pagealloc.c
@@ -0,0 +1,129 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/page-debug-flags.h>
+#include <linux/poison.h>
+
+static inline void set_page_poison(struct page *page)
+{
+ __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static inline void clear_page_poison(struct page *page)
+{
+ __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static inline bool page_poison(struct page *page)
+{
+ return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static void poison_highpage(struct page *page)
+{
+ /*
+ * Page poisoning for highmem pages is not implemented.
+ *
+ * This can be called from interrupt contexts.
+ * So we need to create a new kmap_atomic slot for this
+ * application and it will need interrupt protection.
+ */
+}
+
+static void poison_page(struct page *page)
+{
+ void *addr;
+
+ if (PageHighMem(page)) {
+ poison_highpage(page);
+ return;
+ }
+ set_page_poison(page);
+ addr = page_address(page);
+ memset(addr, PAGE_POISON, PAGE_SIZE);
+}
+
+static void poison_pages(struct page *page, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ poison_page(page + i);
+}
+
+static bool single_bit_flip(unsigned char a, unsigned char b)
+{
+ unsigned char error = a ^ b;
+
+ return error && !(error & (error - 1));
+}
+
+static void check_poison_mem(unsigned char *mem, size_t bytes)
+{
+ unsigned char *start;
+ unsigned char *end;
+
+ for (start = mem; start < mem + bytes; start++) {
+ if (*start != PAGE_POISON)
+ break;
+ }
+ if (start == mem + bytes)
+ return;
+
+ for (end = mem + bytes - 1; end > start; end--) {
+ if (*end != PAGE_POISON)
+ break;
+ }
+
+ if (!printk_ratelimit())
+ return;
+ else if (start == end && single_bit_flip(*start, PAGE_POISON))
+ printk(KERN_ERR "pagealloc: single bit error\n");
+ else
+ printk(KERN_ERR "pagealloc: memory corruption\n");
+
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
+ end - start + 1, 1);
+ dump_stack();
+}
+
+static void unpoison_highpage(struct page *page)
+{
+ /*
+ * See comment in poison_highpage().
+ * Highmem pages should not be poisoned for now
+ */
+ BUG_ON(page_poison(page));
+}
+
+static void unpoison_page(struct page *page)
+{
+ if (PageHighMem(page)) {
+ unpoison_highpage(page);
+ return;
+ }
+ if (page_poison(page)) {
+ void *addr = page_address(page);
+
+ check_poison_mem(addr, PAGE_SIZE);
+ clear_page_poison(page);
+ }
+}
+
+static void unpoison_pages(struct page *page, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ unpoison_page(page + i);
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled)
+ return;
+
+ if (enable)
+ unpoison_pages(page, numpages);
+ else
+ poison_pages(page, numpages);
+}
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe51808..fc11974f2bee 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -565,6 +565,24 @@ void wait_on_page_bit(struct page *page, int bit_nr)
EXPORT_SYMBOL(wait_on_page_bit);
/**
+ * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
+ * @page - Page defining the wait queue of interest
+ * @waiter - Waiter to add to the queue
+ *
+ * Add an arbitrary @waiter to the wait queue for the nominated @page.
+ */
+void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
+{
+ wait_queue_head_t *q = page_waitqueue(page);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, waiter);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL_GPL(add_page_wait_queue);
+
+/**
* unlock_page - unlock a locked page
* @page: the page
*
@@ -1823,7 +1841,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
@@ -1851,8 +1869,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_inatomic_nocache(kaddr + offset,
- buf, bytes);
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1897,7 @@ size_t iov_iter_copy_from_user(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+ left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -2464,6 +2481,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
+ * This may also be called if PG_fscache is set on a page, indicating that the
+ * page is known to the local caching routines.
+ *
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
*
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 0c04615651b7..427dfe3ce78c 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping,
}
}
nr = nr - offset;
- if (nr > len)
- nr = len;
+ if (nr > len - copied)
+ nr = len - copied;
error = mapping->a_ops->get_xip_mem(mapping, index, 0,
&xip_mem, &xip_pfn);
diff --git a/mm/highmem.c b/mm/highmem.c
index b36b83b920ff..68eb1d9b63fa 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table;
static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+/*
+ * Most architectures have no use for kmap_high_get(), so let's abstract
+ * the disabling of IRQ out of the locking in that case to save on a
+ * potential useless overhead.
+ */
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+#define lock_kmap() spin_lock_irq(&kmap_lock)
+#define unlock_kmap() spin_unlock_irq(&kmap_lock)
+#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
+#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
+#else
+#define lock_kmap() spin_lock(&kmap_lock)
+#define unlock_kmap() spin_unlock(&kmap_lock)
+#define lock_kmap_any(flags) \
+ do { spin_lock(&kmap_lock); (void)(flags); } while (0)
+#define unlock_kmap_any(flags) \
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+#endif
+
static void flush_all_zero_pkmaps(void)
{
int i;
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
*/
void kmap_flush_unused(void)
{
- spin_lock(&kmap_lock);
+ lock_kmap();
flush_all_zero_pkmaps();
- spin_unlock(&kmap_lock);
+ unlock_kmap();
}
static inline unsigned long map_new_virtual(struct page *page)
@@ -145,10 +164,10 @@ start:
__set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
+ unlock_kmap();
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
+ lock_kmap();
/* Somebody else might have mapped it while we slept */
if (page_address(page))
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page)
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
*/
- spin_lock(&kmap_lock);
+ lock_kmap();
vaddr = (unsigned long)page_address(page);
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
- spin_unlock(&kmap_lock);
+ unlock_kmap();
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_high);
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+/**
+ * kmap_high_get - pin a highmem page into memory
+ * @page: &struct page to pin
+ *
+ * Returns the page's current virtual memory address, or NULL if no mapping
+ * exists. When and only when a non null address is returned then a
+ * matching call to kunmap_high() is necessary.
+ *
+ * This can be called from any context.
+ */
+void *kmap_high_get(struct page *page)
+{
+ unsigned long vaddr, flags;
+
+ lock_kmap_any(flags);
+ vaddr = (unsigned long)page_address(page);
+ if (vaddr) {
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
+ pkmap_count[PKMAP_NR(vaddr)]++;
+ }
+ unlock_kmap_any(flags);
+ return (void*) vaddr;
+}
+#endif
+
/**
* kunmap_high - map a highmem page into memory
* @page: &struct page to unmap
+ *
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
+ * only from user context.
*/
void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
+ unsigned long flags;
int need_wakeup;
- spin_lock(&kmap_lock);
+ lock_kmap_any(flags);
vaddr = (unsigned long)page_address(page);
BUG_ON(!vaddr);
nr = PKMAP_NR(vaddr);
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page)
*/
need_wakeup = waitqueue_active(&pkmap_map_wait);
}
- spin_unlock(&kmap_lock);
+ unlock_kmap_any(flags);
/* do wake-up, if needed, race-free outside of the spin lock */
if (need_wakeup)
@@ -373,3 +422,48 @@ void __init page_address_init(void)
}
#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
+
+#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
+
+void debug_kmap_atomic(enum km_type type)
+{
+ static unsigned warn_count = 10;
+
+ if (unlikely(warn_count == 0))
+ return;
+
+ if (unlikely(in_interrupt())) {
+ if (in_irq()) {
+ if (type != KM_IRQ0 && type != KM_IRQ1 &&
+ type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
+ type != KM_BOUNCE_READ) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ } else if (!irqs_disabled()) { /* softirq */
+ if (type != KM_IRQ0 && type != KM_IRQ1 &&
+ type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
+ type != KM_SKB_SUNRPC_DATA &&
+ type != KM_SKB_DATA_SOFTIRQ &&
+ type != KM_BOUNCE_READ) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ }
+ }
+
+ if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
+ type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
+ if (!irqs_disabled()) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
+ if (irq_count() == 0 && !irqs_disabled()) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ }
+}
+
+#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 107da3d809a8..28c655ba9353 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -918,7 +918,7 @@ static void return_unused_surplus_pages(struct hstate *h,
* an instantiated the change should be committed via vma_commit_reservation.
* No action is required on failure.
*/
-static int vma_needs_reservation(struct hstate *h,
+static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -933,7 +933,7 @@ static int vma_needs_reservation(struct hstate *h,
return 1;
} else {
- int err;
+ long err;
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
@@ -969,7 +969,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct page *page;
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
- unsigned int chg;
+ long chg;
/*
* Processes that did not create the mapping will have no reserves and
diff --git a/mm/internal.h b/mm/internal.h
index 478223b73a2a..987bb03fbdd8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
+#ifdef CONFIG_HAVE_MLOCK
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
+#endif
#ifdef CONFIG_UNEVICTABLE_LRU
/*
@@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
}
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
@@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
}
}
-#else /* CONFIG_UNEVICTABLE_LRU */
+#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
static inline void free_page_mlock(struct page *page) { }
-#endif /* CONFIG_UNEVICTABLE_LRU */
+#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
/*
* Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8e4be9cb2a6a..2fc6d6c48238 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -27,6 +27,7 @@
#include <linux/backing-dev.h>
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/limits.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -95,6 +96,15 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
return ret;
}
+static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
+{
+ s64 ret;
+
+ ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
+ ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
+ return ret;
+}
+
/*
* per-zone information in memory controller.
*/
@@ -154,9 +164,9 @@ struct mem_cgroup {
/*
* While reclaiming in a hiearchy, we cache the last child we
- * reclaimed from. Protected by hierarchy_mutex
+ * reclaimed from.
*/
- struct mem_cgroup *last_scanned_child;
+ int last_scanned_child;
/*
* Should the accounting and control be hierarchical, per subtree?
*/
@@ -247,7 +257,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
return mem_cgroup_zoneinfo(mem, nid, zid);
}
-static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
enum lru_list idx)
{
int nid, zid;
@@ -286,6 +296,9 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
struct mem_cgroup *mem = NULL;
+
+ if (!mm)
+ return NULL;
/*
* Because we have no locks, mm->owner's may be being moved to other
* cgroup. We use css_tryget() here even if this looks
@@ -308,6 +321,42 @@ static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
return css_is_removed(&mem->css);
}
+
+/*
+ * Call callback function against all cgroup under hierarchy tree.
+ */
+static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
+ int (*func)(struct mem_cgroup *, void *))
+{
+ int found, ret, nextid;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *mem;
+
+ if (!root->use_hierarchy)
+ return (*func)(root, data);
+
+ nextid = 1;
+ do {
+ ret = 0;
+ mem = NULL;
+
+ rcu_read_lock();
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
+ &found);
+ if (css && css_tryget(css))
+ mem = container_of(css, struct mem_cgroup, css);
+ rcu_read_unlock();
+
+ if (mem) {
+ ret = (*func)(mem, data);
+ css_put(&mem->css);
+ }
+ nextid = found + 1;
+ } while (!ret && css);
+
+ return ret;
+}
+
/*
* Following LRU functions are allowed to be used without PCG_LOCK.
* Operations are called by routine of global LRU independently from memcg.
@@ -441,31 +490,24 @@ void mem_cgroup_move_lists(struct page *page,
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
int ret;
+ struct mem_cgroup *curr = NULL;
task_lock(task);
- ret = task->mm && mm_match_cgroup(task->mm, mem);
+ rcu_read_lock();
+ curr = try_get_mem_cgroup_from_mm(task->mm);
+ rcu_read_unlock();
task_unlock(task);
+ if (!curr)
+ return 0;
+ if (curr->use_hierarchy)
+ ret = css_is_ancestor(&curr->css, &mem->css);
+ else
+ ret = (curr == mem);
+ css_put(&curr->css);
return ret;
}
/*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
-int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
- long total, rss;
-
- /*
- * usage is recorded in bytes. But, here, we assume the number of
- * physical pages can be represented by "long" on any arch.
- */
- total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
- rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
- return (int)((rss * 100L) / total);
-}
-
-/*
* prev_priority control...this will be used in memory reclaim path.
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
@@ -501,8 +543,8 @@ static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_
unsigned long gb;
unsigned long inactive_ratio;
- inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
- active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+ inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
@@ -629,172 +671,202 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
-/*
- * This routine finds the DFS walk successor. This routine should be
- * called with hierarchy_mutex held
- */
-static struct mem_cgroup *
-__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
- struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
-
- curr_cgroup = curr->css.cgroup;
- root_cgroup = root_mem->css.cgroup;
+ if (do_swap_account) {
+ if (res_counter_check_under_limit(&mem->res) &&
+ res_counter_check_under_limit(&mem->memsw))
+ return true;
+ } else
+ if (res_counter_check_under_limit(&mem->res))
+ return true;
+ return false;
+}
- if (!list_empty(&curr_cgroup->children)) {
- /*
- * Walk down to children
- */
- cgroup = list_entry(curr_cgroup->children.next,
- struct cgroup, sibling);
- curr = mem_cgroup_from_cont(cgroup);
- goto done;
- }
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+ struct cgroup *cgrp = memcg->css.cgroup;
+ unsigned int swappiness;
-visit_parent:
- if (curr_cgroup == root_cgroup) {
- /* caller handles NULL case */
- curr = NULL;
- goto done;
- }
+ /* root ? */
+ if (cgrp->parent == NULL)
+ return vm_swappiness;
- /*
- * Goto next sibling
- */
- if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
- cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
- sibling);
- curr = mem_cgroup_from_cont(cgroup);
- goto done;
- }
+ spin_lock(&memcg->reclaim_param_lock);
+ swappiness = memcg->swappiness;
+ spin_unlock(&memcg->reclaim_param_lock);
- /*
- * Go up to next parent and next parent's sibling if need be
- */
- curr_cgroup = curr_cgroup->parent;
- goto visit_parent;
+ return swappiness;
+}
-done:
- return curr;
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+ int *val = data;
+ (*val)++;
+ return 0;
}
-/*
- * Visit the first child (need not be the first child as per the ordering
- * of the cgroup list, since we track last_scanned_child) of @mem and use
- * that to reclaim free pages from.
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
*/
-static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
- struct cgroup *cgroup;
- struct mem_cgroup *orig, *next;
- bool obsolete;
-
+ struct cgroup *task_cgrp;
+ struct cgroup *mem_cgrp;
/*
- * Scan all children under the mem_cgroup mem
+ * Need a buffer in BSS, can't rely on allocations. The code relies
+ * on the assumption that OOM is serialized for memory controller.
+ * If this assumption is broken, revisit this code.
*/
- mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+ static char memcg_name[PATH_MAX];
+ int ret;
+
+ if (!memcg)
+ return;
- orig = root_mem->last_scanned_child;
- obsolete = mem_cgroup_is_obsolete(orig);
- if (list_empty(&root_mem->css.cgroup->children)) {
+ rcu_read_lock();
+
+ mem_cgrp = memcg->css.cgroup;
+ task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+ ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
/*
- * root_mem might have children before and last_scanned_child
- * may point to one of them. We put it later.
+ * Unfortunately, we are unable to convert to a useful name
+ * But we'll still print out the usage information
*/
- if (orig)
- VM_BUG_ON(!obsolete);
- next = NULL;
+ rcu_read_unlock();
goto done;
}
+ rcu_read_unlock();
- if (!orig || obsolete) {
- cgroup = list_first_entry(&root_mem->css.cgroup->children,
- struct cgroup, sibling);
- next = mem_cgroup_from_cont(cgroup);
- } else
- next = __mem_cgroup_get_next_node(orig, root_mem);
+ printk(KERN_INFO "Task in %s killed", memcg_name);
+ rcu_read_lock();
+ ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
+ rcu_read_unlock();
+ goto done;
+ }
+ rcu_read_unlock();
+
+ /*
+ * Continues from above, so we don't need an KERN_ level
+ */
+ printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
done:
- if (next)
- mem_cgroup_get(next);
- root_mem->last_scanned_child = next;
- if (orig)
- mem_cgroup_put(orig);
- mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
- return (next) ? next : root_mem;
+
+ printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+ res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->res, RES_FAILCNT));
+ printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+ "failcnt %llu\n",
+ res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
}
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
{
- if (do_swap_account) {
- if (res_counter_check_under_limit(&mem->res) &&
- res_counter_check_under_limit(&mem->memsw))
- return true;
- } else
- if (res_counter_check_under_limit(&mem->res))
- return true;
- return false;
+ int num = 0;
+ mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+ return num;
}
-static unsigned int get_swappiness(struct mem_cgroup *memcg)
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_select_victim(struct mem_cgroup *root_mem)
{
- struct cgroup *cgrp = memcg->css.cgroup;
- unsigned int swappiness;
+ struct mem_cgroup *ret = NULL;
+ struct cgroup_subsys_state *css;
+ int nextid, found;
- /* root ? */
- if (cgrp->parent == NULL)
- return vm_swappiness;
+ if (!root_mem->use_hierarchy) {
+ css_get(&root_mem->css);
+ ret = root_mem;
+ }
- spin_lock(&memcg->reclaim_param_lock);
- swappiness = memcg->swappiness;
- spin_unlock(&memcg->reclaim_param_lock);
+ while (!ret) {
+ rcu_read_lock();
+ nextid = root_mem->last_scanned_child + 1;
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+ &found);
+ if (css && css_tryget(css))
+ ret = container_of(css, struct mem_cgroup, css);
+
+ rcu_read_unlock();
+ /* Updates scanning parameter */
+ spin_lock(&root_mem->reclaim_param_lock);
+ if (!css) {
+ /* this means start scan from ID:1 */
+ root_mem->last_scanned_child = 0;
+ } else
+ root_mem->last_scanned_child = found;
+ spin_unlock(&root_mem->reclaim_param_lock);
+ }
- return swappiness;
+ return ret;
}
/*
- * Dance down the hierarchy if needed to reclaim memory. We remember the
- * last child we reclaimed from, so that we don't end up penalizing
- * one child extensively based on its position in the children list.
+ * Scan the hierarchy if needed to reclaim memory. We remember the last child
+ * we reclaimed from, so that we don't end up penalizing one child extensively
+ * based on its position in the children list.
*
* root_mem is the original ancestor that we've been reclaim from.
+ *
+ * We give up and return to the caller when we visit root_mem twice.
+ * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
*/
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
- gfp_t gfp_mask, bool noswap)
-{
- struct mem_cgroup *next_mem;
- int ret = 0;
-
- /*
- * Reclaim unconditionally and don't check for return value.
- * We need to reclaim in the current group and down the tree.
- * One might think about checking for children before reclaiming,
- * but there might be left over accounting, even after children
- * have left.
- */
- ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
- get_swappiness(root_mem));
- if (mem_cgroup_check_under_limit(root_mem))
- return 1; /* indicate reclaim has succeeded */
- if (!root_mem->use_hierarchy)
- return ret;
-
- next_mem = mem_cgroup_get_next_node(root_mem);
-
- while (next_mem != root_mem) {
- if (mem_cgroup_is_obsolete(next_mem)) {
- next_mem = mem_cgroup_get_next_node(root_mem);
+ gfp_t gfp_mask, bool noswap, bool shrink)
+{
+ struct mem_cgroup *victim;
+ int ret, total = 0;
+ int loop = 0;
+
+ while (loop < 2) {
+ victim = mem_cgroup_select_victim(root_mem);
+ if (victim == root_mem)
+ loop++;
+ if (!mem_cgroup_local_usage(&victim->stat)) {
+ /* this cgroup's local usage == 0 */
+ css_put(&victim->css);
continue;
}
- ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
- get_swappiness(next_mem));
+ /* we use swappiness of local cgroup */
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
+ get_swappiness(victim));
+ css_put(&victim->css);
+ /*
+ * At shrinking usage, we can't check we should stop here or
+ * reclaim more. It's depends on callers. last_scanned_child
+ * will work enough for keeping fairness under tree.
+ */
+ if (shrink)
+ return ret;
+ total += ret;
if (mem_cgroup_check_under_limit(root_mem))
- return 1; /* indicate reclaim has succeeded */
- next_mem = mem_cgroup_get_next_node(root_mem);
+ return 1 + total;
}
- return ret;
+ return total;
}
bool mem_cgroup_oom_called(struct task_struct *task)
@@ -813,6 +885,19 @@ bool mem_cgroup_oom_called(struct task_struct *task)
rcu_read_unlock();
return ret;
}
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+ mem->last_oom_jiffies = jiffies;
+ return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+
/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
@@ -875,7 +960,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto nomem;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
- noswap);
+ noswap, false);
if (ret)
continue;
@@ -895,7 +980,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
mutex_unlock(&memcg_tasklist);
- mem_over_limit->last_oom_jiffies = jiffies;
+ record_last_oom(mem_over_limit);
}
goto nomem;
}
@@ -906,20 +991,55 @@ nomem:
return -ENOMEM;
}
+
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller must check css_is_removed() or some if
+ * it's concern. (dropping refcnt from swap can be called against removed
+ * memcg.)
+ */
+static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ /* ID 0 is unused ID */
+ if (!id)
+ return NULL;
+ css = css_lookup(&mem_cgroup_subsys, id);
+ if (!css)
+ return NULL;
+ return container_of(css, struct mem_cgroup, css);
+}
+
static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
{
struct mem_cgroup *mem;
+ struct page_cgroup *pc;
+ unsigned short id;
swp_entry_t ent;
+ VM_BUG_ON(!PageLocked(page));
+
if (!PageSwapCache(page))
return NULL;
- ent.val = page_private(page);
- mem = lookup_swap_cgroup(ent);
- if (!mem)
- return NULL;
- if (!css_tryget(&mem->css))
- return NULL;
+ pc = lookup_page_cgroup(page);
+ /*
+ * Used bit of swapcache is solid under page lock.
+ */
+ if (PageCgroupUsed(pc)) {
+ mem = pc->mem_cgroup;
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ } else {
+ ent.val = page_private(page);
+ id = lookup_swap_cgroup(ent);
+ rcu_read_lock();
+ mem = mem_cgroup_lookup(id);
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ rcu_read_unlock();
+ }
return mem;
}
@@ -1118,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page,
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
}
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype);
+
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
@@ -1154,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
unlock_page_cgroup(pc);
}
- if (do_swap_account && PageSwapCache(page)) {
- mem = try_get_mem_cgroup_from_swapcache(page);
- if (mem)
- mm = NULL;
- else
- mem = NULL;
- /* SwapCache may be still linked to LRU now. */
- mem_cgroup_lru_del_before_commit_swapcache(page);
- }
-
if (unlikely(!mm && !mem))
mm = &init_mm;
@@ -1171,22 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
- ret = mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (mem)
- css_put(&mem->css);
- if (PageSwapCache(page))
- mem_cgroup_lru_add_after_commit_swapcache(page);
+ /* shmem */
+ if (PageSwapCache(page)) {
+ ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+ if (!ret)
+ __mem_cgroup_commit_charge_swapin(page, mem,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM);
+ } else
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (do_swap_account && !ret && PageSwapCache(page)) {
- swp_entry_t ent = {.val = page_private(page)};
- /* avoid double counting */
- mem = swap_cgroup_record(ent, NULL);
- if (mem) {
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- mem_cgroup_put(mem);
- }
- }
return ret;
}
@@ -1229,7 +1337,9 @@ charge_cur_mm:
return __mem_cgroup_try_charge(mm, mask, ptr, true);
}
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype)
{
struct page_cgroup *pc;
@@ -1239,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
return;
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page);
- __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ __mem_cgroup_commit_charge(ptr, pc, ctype);
mem_cgroup_lru_add_after_commit_swapcache(page);
/*
* Now swap is on-memory. This means this page may be
@@ -1250,18 +1360,32 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
+ unsigned short id;
struct mem_cgroup *memcg;
- memcg = swap_cgroup_record(ent, NULL);
+
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * This recorded memcg can be obsolete one. So, avoid
+ * calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
-
+ rcu_read_unlock();
}
/* add this page(page_cgroup) to the LRU we want. */
}
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+ __mem_cgroup_commit_charge_swapin(page, ptr,
+ MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{
if (mem_cgroup_disabled())
@@ -1324,8 +1448,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-
mem_cgroup_charge_statistics(mem, pc, false);
+
ClearPageCgroupUsed(pc);
/*
* pc->mem_cgroup is not cleared here. It will be accessed when it's
@@ -1377,7 +1501,7 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
if (do_swap_account && memcg) {
- swap_cgroup_record(ent, memcg);
+ swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
if (memcg)
@@ -1392,15 +1516,23 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
void mem_cgroup_uncharge_swap(swp_entry_t ent)
{
struct mem_cgroup *memcg;
+ unsigned short id;
if (!do_swap_account)
return;
- memcg = swap_cgroup_record(ent, NULL);
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * We uncharge this because swap is freed.
+ * This memcg can be obsolete one. We avoid calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
+ rcu_read_unlock();
}
#endif
@@ -1508,7 +1640,8 @@ int mem_cgroup_shrink_usage(struct page *page,
return 0;
do {
- progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
+ progress = mem_cgroup_hierarchical_reclaim(mem,
+ gfp_mask, true, false);
progress += mem_cgroup_check_under_limit(mem);
} while (!progress && --retry);
@@ -1523,11 +1656,21 @@ static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
-
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
int progress;
u64 memswlimit;
int ret = 0;
+ int children = mem_cgroup_count_children(memcg);
+ u64 curusage, oldusage;
+
+ /*
+ * For keeping hierarchical_reclaim simple, how long we should retry
+ * is depends on callers. We set our retry-count to be function
+ * of # of children which we should visit in this loop.
+ */
+ retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
+
+ oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
@@ -1553,8 +1696,13 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
- false);
- if (!progress) retry_count--;
+ false, true);
+ curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ /* Usage is reduced ? */
+ if (curusage >= oldusage)
+ retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
@@ -1563,13 +1711,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
u64 memlimit, oldusage, curusage;
- int ret;
+ int children = mem_cgroup_count_children(memcg);
+ int ret = -EBUSY;
if (!do_swap_account)
return -EINVAL;
-
+ /* see mem_cgroup_resize_res_limit */
+ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+ oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
@@ -1593,11 +1744,13 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ /* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
}
@@ -1893,54 +2046,90 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
return 0;
}
-static const struct mem_cgroup_stat_desc {
- const char *msg;
- u64 unit;
-} mem_cgroup_stat_desc[] = {
- [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
- [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
- [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
- [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
+
+/* For read statistics */
+enum {
+ MCS_CACHE,
+ MCS_RSS,
+ MCS_PGPGIN,
+ MCS_PGPGOUT,
+ MCS_INACTIVE_ANON,
+ MCS_ACTIVE_ANON,
+ MCS_INACTIVE_FILE,
+ MCS_ACTIVE_FILE,
+ MCS_UNEVICTABLE,
+ NR_MCS_STAT,
+};
+
+struct mcs_total_stat {
+ s64 stat[NR_MCS_STAT];
+};
+
+struct {
+ char *local_name;
+ char *total_name;
+} memcg_stat_strings[NR_MCS_STAT] = {
+ {"cache", "total_cache"},
+ {"rss", "total_rss"},
+ {"pgpgin", "total_pgpgin"},
+ {"pgpgout", "total_pgpgout"},
+ {"inactive_anon", "total_inactive_anon"},
+ {"active_anon", "total_active_anon"},
+ {"inactive_file", "total_inactive_file"},
+ {"active_file", "total_active_file"},
+ {"unevictable", "total_unevictable"}
};
+
+static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+{
+ struct mcs_total_stat *s = data;
+ s64 val;
+
+ /* per cpu stat */
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+ s->stat[MCS_CACHE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+ s->stat[MCS_RSS] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
+ s->stat[MCS_PGPGIN] += val;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+ s->stat[MCS_PGPGOUT] += val;
+
+ /* per zone stat */
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
+ s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
+ s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
+ s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
+ s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
+ s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
+ return 0;
+}
+
+static void
+mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
+{
+ mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+}
+
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
- struct mem_cgroup_stat *stat = &mem_cont->stat;
+ struct mcs_total_stat mystat;
int i;
- for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
- s64 val;
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_local_stat(mem_cont, &mystat);
- val = mem_cgroup_read_stat(stat, i);
- val *= mem_cgroup_stat_desc[i].unit;
- cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
- }
- /* showing # of active pages */
- {
- unsigned long active_anon, inactive_anon;
- unsigned long active_file, inactive_file;
- unsigned long unevictable;
-
- inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_ANON);
- active_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_ANON);
- inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_FILE);
- active_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_FILE);
- unevictable = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_UNEVICTABLE);
-
- cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
- cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
- cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
- cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
- cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
- }
+ /* Hierarchical information */
{
unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
@@ -1949,6 +2138,12 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
}
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_total_stat(mem_cont, &mystat);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+
+
#ifdef CONFIG_DEBUG_VM
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2178,6 +2373,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
{
int node;
+ free_css_id(&mem_cgroup_subsys, &mem->css);
+
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
@@ -2228,11 +2425,12 @@ static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem, *parent;
+ long error = -ENOMEM;
int node;
mem = mem_cgroup_alloc();
if (!mem)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
@@ -2260,7 +2458,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
res_counter_init(&mem->res, NULL);
res_counter_init(&mem->memsw, NULL);
}
- mem->last_scanned_child = NULL;
+ mem->last_scanned_child = 0;
spin_lock_init(&mem->reclaim_param_lock);
if (parent)
@@ -2269,26 +2467,22 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &mem->css;
free_out:
__mem_cgroup_free(mem);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
-static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
+static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- mem_cgroup_force_empty(mem, false);
+
+ return mem_cgroup_force_empty(mem, false);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
- if (last_scanned_child) {
- VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
- mem_cgroup_put(last_scanned_child);
- }
mem_cgroup_put(mem);
}
@@ -2327,6 +2521,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
.populate = mem_cgroup_populate,
.attach = mem_cgroup_move_task,
.early_init = 0,
+ .use_id = 1,
};
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
diff --git a/mm/memory.c b/mm/memory.c
index baa999e87cd2..cf6873e91c6a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1151,6 +1151,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
+ /*
+ * pte_mkyoung() would be more correct here, but atomic care
+ * is needed to avoid losing the dirty bit: it is easier to use
+ * mark_page_accessed().
+ */
mark_page_accessed(page);
}
unlock:
@@ -1665,9 +1670,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
*/
- if (addr == vma->vm_start && end == vma->vm_end)
+ if (addr == vma->vm_start && end == vma->vm_end) {
vma->vm_pgoff = pfn;
- else if (is_cow_mapping(vma->vm_flags))
+ vma->vm_flags |= VM_PFN_AT_MMAP;
+ } else if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
@@ -1679,6 +1685,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* needed from higher level routine calling unmap_vmas
*/
vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags &= ~VM_PFN_AT_MMAP;
return -EINVAL;
}
@@ -1938,6 +1945,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+ struct vm_fault vmf;
+ int tmp;
+
+ vmf.virtual_address = (void __user *)(address &
+ PAGE_MASK);
+ vmf.pgoff = old_page->index;
+ vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+ vmf.page = old_page;
+
/*
* Notify the address space that the page is about to
* become writable so that it can prohibit this or wait
@@ -1949,8 +1965,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
- if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+ tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+ if (unlikely(tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+ ret = tmp;
goto unwritable_page;
+ }
/*
* Since we dropped the lock we need to revalidate
@@ -2099,7 +2119,7 @@ oom:
unwritable_page:
page_cache_release(old_page);
- return VM_FAULT_SIGBUS;
+ return ret;
}
/*
@@ -2433,8 +2453,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(PGMAJFAULT);
}
- mark_page_accessed(page);
-
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2643,9 +2661,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* to become writable
*/
if (vma->vm_ops->page_mkwrite) {
+ int tmp;
+
unlock_page(page);
- if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
- ret = VM_FAULT_SIGBUS;
+ vmf.flags |= FAULT_FLAG_MKWRITE;
+ tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+ if (unlikely(tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+ ret = tmp;
anon = 1; /* no anon but release vmf.page */
goto out_unlocked;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index a9eff3f092f6..068655d8f883 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -250,7 +250,7 @@ out:
* The number of remaining references must be:
* 1 for anonymous pages without a mapping
* 2 for pages with a mapping
- * 3 for pages with a mapping and PagePrivate set.
+ * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
*/
static int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
@@ -270,7 +270,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + !!PagePrivate(page);
+ expected_count = 2 + !!page_has_private(page);
if (page_count(page) != expected_count ||
(struct page *)radix_tree_deref_slot(pslot) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -386,7 +386,7 @@ EXPORT_SYMBOL(fail_migrate_page);
/*
* Common logic to directly migrate a single page suitable for
- * pages that do not use PagePrivate.
+ * pages that do not use PagePrivate/PagePrivate2.
*
* Pages are locked upon entry and exit.
*/
@@ -522,7 +522,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* Buffers may be managed in a filesystem specific way.
* We must have no buffers or drop them.
*/
- if (PagePrivate(page) &&
+ if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
@@ -655,7 +655,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* free the metadata, so the page can be freed.
*/
if (!page->mapping) {
- if (!PageAnon(page) && PagePrivate(page)) {
+ if (!PageAnon(page) && page_has_private(page)) {
/*
* Go direct to try_to_free_buffers() here because
* a) that's what try_to_release_page() would do anyway
diff --git a/mm/mmap.c b/mm/mmap.c
index 00ced3ee49a8..4a3841186c11 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
+#include <linux/ima.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/module.h>
@@ -1049,6 +1050,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
+ error = ima_file_mmap(file, prot);
+ if (error)
+ return error;
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
@@ -2477,7 +2481,4 @@ void mm_drop_all_locks(struct mm_struct *mm)
*/
void __init mmap_init(void)
{
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL);
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 2fcf47d449b4..72eda4aee2cb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -69,7 +69,7 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
int heap_stack_gap = 0;
-atomic_t mmap_pages_allocated;
+atomic_long_t mmap_pages_allocated;
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(num_physpages);
@@ -463,12 +463,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
*/
void __init mmap_init(void)
{
- vm_region_jar = kmem_cache_create("vm_region_jar",
- sizeof(struct vm_region), 0,
- SLAB_PANIC, NULL);
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL);
+ vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
}
/*
@@ -486,27 +481,24 @@ static noinline void validate_nommu_regions(void)
return;
last = rb_entry(lastp, struct vm_region, vm_rb);
- if (unlikely(last->vm_end <= last->vm_start))
- BUG();
- if (unlikely(last->vm_top < last->vm_end))
- BUG();
+ BUG_ON(unlikely(last->vm_end <= last->vm_start));
+ BUG_ON(unlikely(last->vm_top < last->vm_end));
while ((p = rb_next(lastp))) {
region = rb_entry(p, struct vm_region, vm_rb);
last = rb_entry(lastp, struct vm_region, vm_rb);
- if (unlikely(region->vm_end <= region->vm_start))
- BUG();
- if (unlikely(region->vm_top < region->vm_end))
- BUG();
- if (unlikely(region->vm_start < last->vm_top))
- BUG();
+ BUG_ON(unlikely(region->vm_end <= region->vm_start));
+ BUG_ON(unlikely(region->vm_top < region->vm_end));
+ BUG_ON(unlikely(region->vm_start < last->vm_top));
lastp = p;
}
}
#else
-#define validate_nommu_regions() do {} while(0)
+static void validate_nommu_regions(void)
+{
+}
#endif
/*
@@ -563,16 +555,17 @@ static void free_page_series(unsigned long from, unsigned long to)
struct page *page = virt_to_page(from);
kdebug("- free %lx", from);
- atomic_dec(&mmap_pages_allocated);
+ atomic_long_dec(&mmap_pages_allocated);
if (page_count(page) != 1)
- kdebug("free page %p [%d]", page, page_count(page));
+ kdebug("free page %p: refcount not one: %d",
+ page, page_count(page));
put_page(page);
}
}
/*
* release a reference to a region
- * - the caller must hold the region semaphore, which this releases
+ * - the caller must hold the region semaphore for writing, which this releases
* - the region may not have been added to the tree yet, in which case vm_top
* will equal vm_start
*/
@@ -1096,7 +1089,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
goto enomem;
total = 1 << order;
- atomic_add(total, &mmap_pages_allocated);
+ atomic_long_add(total, &mmap_pages_allocated);
point = rlen >> PAGE_SHIFT;
@@ -1107,7 +1100,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
order = ilog2(total - point);
n = 1 << order;
kdebug("shave %lu/%lu @%lu", n, total - point, total);
- atomic_sub(n, &mmap_pages_allocated);
+ atomic_long_sub(n, &mmap_pages_allocated);
total -= n;
set_page_refcounted(pages + total);
__free_pages(pages + total, order);
@@ -1536,10 +1529,15 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* find the first potentially overlapping VMA */
vma = find_vma(mm, start);
if (!vma) {
- printk(KERN_WARNING
- "munmap of memory not mmapped by process %d (%s):"
- " 0x%lx-0x%lx\n",
- current->pid, current->comm, start, start + len - 1);
+ static int limit = 0;
+ if (limit < 5) {
+ printk(KERN_WARNING
+ "munmap of memory not mmapped by process %d"
+ " (%s): 0x%lx-0x%lx\n",
+ current->pid, current->comm,
+ start, start + len - 1);
+ limit++;
+ }
return -EINVAL;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 40ba05061a4f..2f3166e308d9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -55,7 +55,7 @@ static DEFINE_SPINLOCK(zone_scan_lock);
unsigned long badness(struct task_struct *p, unsigned long uptime)
{
- unsigned long points, cpu_time, run_time, s;
+ unsigned long points, cpu_time, run_time;
struct mm_struct *mm;
struct task_struct *child;
@@ -110,12 +110,10 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
else
run_time = 0;
- s = int_sqrt(cpu_time);
- if (s)
- points /= s;
- s = int_sqrt(int_sqrt(run_time));
- if (s)
- points /= s;
+ if (cpu_time)
+ points /= int_sqrt(cpu_time);
+ if (run_time)
+ points /= int_sqrt(int_sqrt(run_time));
/*
* Niced processes are most likely less important, so double
@@ -396,6 +394,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
+ mem_cgroup_print_oom_info(mem, current);
show_mem();
if (sysctl_oom_dump_tasks)
dump_tasks(mem);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 74dc57c74349..30351f0063ac 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void)
/*
* Start background writeback (via pdflush) at this percentage
*/
-int dirty_background_ratio = 5;
+int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
-int vm_dirty_ratio = 10;
+int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
@@ -92,14 +92,14 @@ int vm_dirty_ratio = 10;
unsigned long vm_dirty_bytes;
/*
- * The interval between `kupdate'-style writebacks, in jiffies
+ * The interval between `kupdate'-style writebacks
*/
-int dirty_writeback_interval = 5 * HZ;
+unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */
/*
- * The longest number of jiffies for which data is allowed to remain dirty
+ * The longest time for which data is allowed to remain dirty
*/
-int dirty_expire_interval = 30 * HZ;
+unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,9 +770,9 @@ static void wb_kupdate(unsigned long arg)
sync_supers();
- oldest_jif = jiffies - dirty_expire_interval;
+ oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval);
start_jif = jiffies;
- next_jif = start_jif + dirty_writeback_interval;
+ next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
nr_to_write = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
@@ -801,9 +801,10 @@ static void wb_kupdate(unsigned long arg)
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length, ppos);
if (dirty_writeback_interval)
- mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
+ mod_timer(&wb_timer, jiffies +
+ msecs_to_jiffies(dirty_writeback_interval * 10));
else
del_timer(&wb_timer);
return 0;
@@ -905,7 +906,8 @@ void __init page_writeback_init(void)
{
int shift;
- mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
+ mod_timer(&wb_timer,
+ jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
@@ -1198,6 +1200,20 @@ int __set_page_dirty_no_writeback(struct page *page)
}
/*
+ * Helper function for set_page_dirty family.
+ * NOTE: This relies on being atomic wrt interrupts.
+ */
+void account_page_dirtied(struct page *page, struct address_space *mapping)
+{
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ task_dirty_inc(current);
+ task_io_account_write(PAGE_CACHE_SIZE);
+ }
+}
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
@@ -1226,13 +1242,7 @@ int __set_page_dirty_nobuffers(struct page *page)
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- if (mapping_cap_account_dirty(mapping)) {
- __inc_zone_page_state(page, NR_FILE_DIRTY);
- __inc_bdi_stat(mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- task_dirty_inc(current);
- task_io_account_write(PAGE_CACHE_SIZE);
- }
+ account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c44ed49ca93..3f30189896fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -331,7 +331,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- if (unlikely(!PageTail(p) | (p->first_page != page))) {
+ if (unlikely(!PageTail(p) || (p->first_page != page))) {
bad_page(page);
bad++;
}
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
unsigned long flags;
struct zone *zone;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- if (!populated_zone(zone))
- continue;
-
pset = zone_pcp(zone, cpu);
pcp = &pset->pcp;
@@ -1479,6 +1476,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
unsigned long did_some_progress;
unsigned long pages_reclaimed = 0;
+ lockdep_trace_alloc(gfp_mask);
+
might_sleep_if(wait);
if (should_fail_alloc_page(gfp_mask, order))
@@ -1578,12 +1577,16 @@ nofail_alloc:
*/
cpuset_update_task_memory_state();
p->flags |= PF_MEMALLOC;
+
+ lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist, order,
+ gfp_mask, nodemask);
p->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
p->flags &= ~PF_MEMALLOC;
cond_resched();
@@ -1874,10 +1877,7 @@ void show_free_areas(void)
int cpu;
struct zone *zone;
- for_each_zone(zone) {
- if (!populated_zone(zone))
- continue;
-
+ for_each_populated_zone(zone) {
show_node(zone);
printk("%s per-cpu:\n", zone->name);
@@ -1917,12 +1917,9 @@ void show_free_areas(void)
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
int i;
- if (!populated_zone(zone))
- continue;
-
show_node(zone);
printk("%s"
" free:%lukB"
@@ -1962,12 +1959,9 @@ void show_free_areas(void)
printk("\n");
}
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
unsigned long nr[MAX_ORDER], flags, order, total = 0;
- if (!populated_zone(zone))
- continue;
-
show_node(zone);
printk("%s: ", zone->name);
@@ -2779,11 +2773,7 @@ static int __cpuinit process_zones(int cpu)
node_set_state(node, N_CPU); /* this node has a cpu */
- for_each_zone(zone) {
-
- if (!populated_zone(zone))
- continue;
-
+ for_each_populated_zone(zone) {
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
GFP_KERNEL, node);
if (!zone_pcp(zone, cpu))
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index ceecfbb143fa..791905c991df 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -285,12 +285,8 @@ struct swap_cgroup_ctrl {
struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
-/*
- * This 8bytes seems big..maybe we can reduce this when we can use "id" for
- * cgroup rather than pointer.
- */
struct swap_cgroup {
- struct mem_cgroup *val;
+ unsigned short id;
};
#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
#define SC_POS_MASK (SC_PER_PAGE - 1)
@@ -342,10 +338,10 @@ not_enough_page:
* @ent: swap entry to be recorded into
* @mem: mem_cgroup to be recorded
*
- * Returns old value at success, NULL at failure.
- * (Of course, old value can be NULL.)
+ * Returns old value at success, 0 at failure.
+ * (Of course, old value can be 0.)
*/
-struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
{
int type = swp_type(ent);
unsigned long offset = swp_offset(ent);
@@ -354,18 +350,18 @@ struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
struct swap_cgroup *sc;
- struct mem_cgroup *old;
+ unsigned short old;
if (!do_swap_account)
- return NULL;
+ return 0;
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
sc = page_address(mappage);
sc += pos;
- old = sc->val;
- sc->val = mem;
+ old = sc->id;
+ sc->id = id;
return old;
}
@@ -374,9 +370,9 @@ struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
* lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
* @ent: swap entry to be looked up.
*
- * Returns pointer to mem_cgroup at success. NULL at failure.
+ * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
*/
-struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+unsigned short lookup_swap_cgroup(swp_entry_t ent)
{
int type = swp_type(ent);
unsigned long offset = swp_offset(ent);
@@ -385,16 +381,16 @@ struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
struct swap_cgroup *sc;
- struct mem_cgroup *ret;
+ unsigned short ret;
if (!do_swap_account)
- return NULL;
+ return 0;
ctrl = &swap_cgroup_ctrl[type];
mappage = ctrl->map[idx];
sc = page_address(mappage);
sc += pos;
- ret = sc->val;
+ ret = sc->id;
return ret;
}
@@ -430,13 +426,6 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
}
mutex_unlock(&swap_cgroup_mutex);
- printk(KERN_INFO
- "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
- " and %ld bytes to hold mem_cgroup pointers on swap\n",
- array_size, length * PAGE_SIZE);
- printk(KERN_INFO
- "swap_cgroup can be disabled by noswapaccount boot option.\n");
-
return 0;
nomem:
printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 15de509b68fd..118905e3d788 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -191,7 +191,7 @@ static int pdflush(void *dummy)
/*
* Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
+ * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
* Our needs are more modest - cut back to our cpusets cpus_allowed.
* This is needed as pdflush's are dynamically created and destroyed.
* The boottime pdflush's are easily placed w/o these 2 lines.
diff --git a/mm/percpu.c b/mm/percpu.c
new file mode 100644
index 000000000000..1aa5d8fbca12
--- /dev/null
+++ b/mm/percpu.c
@@ -0,0 +1,1326 @@
+/*
+ * linux/mm/percpu.c - percpu memory allocator
+ *
+ * Copyright (C) 2009 SUSE Linux Products GmbH
+ * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * This is percpu allocator which can handle both static and dynamic
+ * areas. Percpu areas are allocated in chunks in vmalloc area. Each
+ * chunk is consisted of num_possible_cpus() units and the first chunk
+ * is used for static percpu variables in the kernel image (special
+ * boot time alloc/init handling necessary as these areas need to be
+ * brought up before allocation services are running). Unit grows as
+ * necessary and all units grow or shrink in unison. When a chunk is
+ * filled up, another chunk is allocated. ie. in vmalloc area
+ *
+ * c0 c1 c2
+ * ------------------- ------------------- ------------
+ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
+ * ------------------- ...... ------------------- .... ------------
+ *
+ * Allocation is done in offset-size areas of single unit space. Ie,
+ * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
+ * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
+ * percpu base registers UNIT_SIZE apart.
+ *
+ * There are usually many small percpu allocations many of them as
+ * small as 4 bytes. The allocator organizes chunks into lists
+ * according to free size and tries to allocate from the fullest one.
+ * Each chunk keeps the maximum contiguous area size hint which is
+ * guaranteed to be eqaul to or larger than the maximum contiguous
+ * area in the chunk. This helps the allocator not to iterate the
+ * chunk maps unnecessarily.
+ *
+ * Allocation state in each chunk is kept using an array of integers
+ * on chunk->map. A positive value in the map represents a free
+ * region and negative allocated. Allocation inside a chunk is done
+ * by scanning this map sequentially and serving the first matching
+ * entry. This is mostly copied from the percpu_modalloc() allocator.
+ * Chunks are also linked into a rb tree to ease address to chunk
+ * mapping during free.
+ *
+ * To use this allocator, arch code should do the followings.
+ *
+ * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ *
+ * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
+ * regular address to percpu pointer and back if they need to be
+ * different from the default
+ *
+ * - use pcpu_setup_first_chunk() during percpu area initialization to
+ * setup the first chunk containing the kernel static percpu area
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bootmem.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/pfn.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
+#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
+
+/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
+#ifndef __addr_to_pcpu_ptr
+#define __addr_to_pcpu_ptr(addr) \
+ (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
+ + (unsigned long)__per_cpu_start)
+#endif
+#ifndef __pcpu_ptr_to_addr
+#define __pcpu_ptr_to_addr(ptr) \
+ (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
+ - (unsigned long)__per_cpu_start)
+#endif
+
+struct pcpu_chunk {
+ struct list_head list; /* linked to pcpu_slot lists */
+ struct rb_node rb_node; /* key is chunk->vm->addr */
+ int free_size; /* free bytes in the chunk */
+ int contig_hint; /* max contiguous size hint */
+ struct vm_struct *vm; /* mapped vmalloc region */
+ int map_used; /* # of map entries used */
+ int map_alloc; /* # of map entries allocated */
+ int *map; /* allocation map */
+ bool immutable; /* no [de]population allowed */
+ struct page **page; /* points to page array */
+ struct page *page_ar[]; /* #cpus * UNIT_PAGES */
+};
+
+static int pcpu_unit_pages __read_mostly;
+static int pcpu_unit_size __read_mostly;
+static int pcpu_chunk_size __read_mostly;
+static int pcpu_nr_slots __read_mostly;
+static size_t pcpu_chunk_struct_size __read_mostly;
+
+/* the address of the first chunk which starts with the kernel static area */
+void *pcpu_base_addr __read_mostly;
+EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+/* optional reserved chunk, only accessible for reserved allocations */
+static struct pcpu_chunk *pcpu_reserved_chunk;
+/* offset limit of the reserved chunk */
+static int pcpu_reserved_chunk_limit;
+
+/*
+ * Synchronization rules.
+ *
+ * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
+ * protects allocation/reclaim paths, chunks and chunk->page arrays.
+ * The latter is a spinlock and protects the index data structures -
+ * chunk slots, rbtree, chunks and area maps in chunks.
+ *
+ * During allocation, pcpu_alloc_mutex is kept locked all the time and
+ * pcpu_lock is grabbed and released as necessary. All actual memory
+ * allocations are done using GFP_KERNEL with pcpu_lock released.
+ *
+ * Free path accesses and alters only the index data structures, so it
+ * can be safely called from atomic context. When memory needs to be
+ * returned to the system, free path schedules reclaim_work which
+ * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
+ * reclaimed, release both locks and frees the chunks. Note that it's
+ * necessary to grab both locks to remove a chunk from circulation as
+ * allocation path might be referencing the chunk with only
+ * pcpu_alloc_mutex locked.
+ */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
+static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
+
+static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
+
+/* reclaim work to release fully free chunks, scheduled from free path */
+static void pcpu_reclaim(struct work_struct *work);
+static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
+
+static int __pcpu_size_to_slot(int size)
+{
+ int highbit = fls(size); /* size is in bytes */
+ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
+}
+
+static int pcpu_size_to_slot(int size)
+{
+ if (size == pcpu_unit_size)
+ return pcpu_nr_slots - 1;
+ return __pcpu_size_to_slot(size);
+}
+
+static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
+{
+ if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
+ return 0;
+
+ return pcpu_size_to_slot(chunk->free_size);
+}
+
+static int pcpu_page_idx(unsigned int cpu, int page_idx)
+{
+ return cpu * pcpu_unit_pages + page_idx;
+}
+
+static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
+{
+ return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+}
+
+static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
+{
+ return (unsigned long)chunk->vm->addr +
+ (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
+}
+
+static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
+ int page_idx)
+{
+ return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+}
+
+/**
+ * pcpu_mem_alloc - allocate memory
+ * @size: bytes to allocate
+ *
+ * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
+ * kzalloc() is used; otherwise, vmalloc() is used. The returned
+ * memory is always zeroed.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_mem_alloc(size_t size)
+{
+ if (size <= PAGE_SIZE)
+ return kzalloc(size, GFP_KERNEL);
+ else {
+ void *ptr = vmalloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+ }
+}
+
+/**
+ * pcpu_mem_free - free memory
+ * @ptr: memory to free
+ * @size: size of the area
+ *
+ * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
+ */
+static void pcpu_mem_free(void *ptr, size_t size)
+{
+ if (size <= PAGE_SIZE)
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+/**
+ * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
+ * @chunk: chunk of interest
+ * @oslot: the previous slot it was on
+ *
+ * This function is called after an allocation or free changed @chunk.
+ * New slot according to the changed state is determined and @chunk is
+ * moved to the slot. Note that the reserved chunk is never put on
+ * chunk slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
+{
+ int nslot = pcpu_chunk_slot(chunk);
+
+ if (chunk != pcpu_reserved_chunk && oslot != nslot) {
+ if (oslot < nslot)
+ list_move(&chunk->list, &pcpu_slot[nslot]);
+ else
+ list_move_tail(&chunk->list, &pcpu_slot[nslot]);
+ }
+}
+
+static struct rb_node **pcpu_chunk_rb_search(void *addr,
+ struct rb_node **parentp)
+{
+ struct rb_node **p = &pcpu_addr_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct pcpu_chunk *chunk;
+
+ while (*p) {
+ parent = *p;
+ chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
+
+ if (addr < chunk->vm->addr)
+ p = &(*p)->rb_left;
+ else if (addr > chunk->vm->addr)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+
+ if (parentp)
+ *parentp = parent;
+ return p;
+}
+
+/**
+ * pcpu_chunk_addr_search - search for chunk containing specified address
+ * @addr: address to search for
+ *
+ * Look for chunk which might contain @addr. More specifically, it
+ * searchs for the chunk with the highest start address which isn't
+ * beyond @addr.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * The address of the found chunk.
+ */
+static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
+{
+ struct rb_node *n, *parent;
+ struct pcpu_chunk *chunk;
+
+ /* is it in the reserved chunk? */
+ if (pcpu_reserved_chunk) {
+ void *start = pcpu_reserved_chunk->vm->addr;
+
+ if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
+ return pcpu_reserved_chunk;
+ }
+
+ /* nah... search the regular ones */
+ n = *pcpu_chunk_rb_search(addr, &parent);
+ if (!n) {
+ /* no exactly matching chunk, the parent is the closest */
+ n = parent;
+ BUG_ON(!n);
+ }
+ chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+
+ if (addr < chunk->vm->addr) {
+ /* the parent was the next one, look for the previous one */
+ n = rb_prev(n);
+ BUG_ON(!n);
+ chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+ }
+
+ return chunk;
+}
+
+/**
+ * pcpu_chunk_addr_insert - insert chunk into address rb tree
+ * @new: chunk to insert
+ *
+ * Insert @new into address rb tree.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
+{
+ struct rb_node **p, *parent;
+
+ p = pcpu_chunk_rb_search(new->vm->addr, &parent);
+ BUG_ON(*p);
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, &pcpu_addr_root);
+}
+
+/**
+ * pcpu_extend_area_map - extend area map for allocation
+ * @chunk: target chunk
+ *
+ * Extend area map of @chunk so that it can accomodate an allocation.
+ * A single allocation can split an area into three areas, so this
+ * function makes sure that @chunk->map has at least two extra slots.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
+ * if area map is extended.
+ *
+ * RETURNS:
+ * 0 if noop, 1 if successfully extended, -errno on failure.
+ */
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
+{
+ int new_alloc;
+ int *new;
+ size_t size;
+
+ /* has enough? */
+ if (chunk->map_alloc >= chunk->map_used + 2)
+ return 0;
+
+ spin_unlock_irq(&pcpu_lock);
+
+ new_alloc = PCPU_DFL_MAP_ALLOC;
+ while (new_alloc < chunk->map_used + 2)
+ new_alloc *= 2;
+
+ new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
+ if (!new) {
+ spin_lock_irq(&pcpu_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Acquire pcpu_lock and switch to new area map. Only free
+ * could have happened inbetween, so map_used couldn't have
+ * grown.
+ */
+ spin_lock_irq(&pcpu_lock);
+ BUG_ON(new_alloc < chunk->map_used + 2);
+
+ size = chunk->map_alloc * sizeof(chunk->map[0]);
+ memcpy(new, chunk->map, size);
+
+ /*
+ * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
+ * one of the first chunks and still using static map.
+ */
+ if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
+ pcpu_mem_free(chunk->map, size);
+
+ chunk->map_alloc = new_alloc;
+ chunk->map = new;
+ return 0;
+}
+
+/**
+ * pcpu_split_block - split a map block
+ * @chunk: chunk of interest
+ * @i: index of map block to split
+ * @head: head size in bytes (can be 0)
+ * @tail: tail size in bytes (can be 0)
+ *
+ * Split the @i'th map block into two or three blocks. If @head is
+ * non-zero, @head bytes block is inserted before block @i moving it
+ * to @i+1 and reducing its size by @head bytes.
+ *
+ * If @tail is non-zero, the target block, which can be @i or @i+1
+ * depending on @head, is reduced by @tail bytes and @tail byte block
+ * is inserted after the target block.
+ *
+ * @chunk->map must have enough free slots to accomodate the split.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
+ int head, int tail)
+{
+ int nr_extra = !!head + !!tail;
+
+ BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
+
+ /* insert new subblocks */
+ memmove(&chunk->map[i + nr_extra], &chunk->map[i],
+ sizeof(chunk->map[0]) * (chunk->map_used - i));
+ chunk->map_used += nr_extra;
+
+ if (head) {
+ chunk->map[i + 1] = chunk->map[i] - head;
+ chunk->map[i++] = head;
+ }
+ if (tail) {
+ chunk->map[i++] -= tail;
+ chunk->map[i] = tail;
+ }
+}
+
+/**
+ * pcpu_alloc_area - allocate area from a pcpu_chunk
+ * @chunk: chunk of interest
+ * @size: wanted size in bytes
+ * @align: wanted align
+ *
+ * Try to allocate @size bytes area aligned at @align from @chunk.
+ * Note that this function only allocates the offset. It doesn't
+ * populate or map the area.
+ *
+ * @chunk->map must have at least two free slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * Allocated offset in @chunk on success, -1 if no matching area is
+ * found.
+ */
+static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
+{
+ int oslot = pcpu_chunk_slot(chunk);
+ int max_contig = 0;
+ int i, off;
+
+ for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
+ bool is_last = i + 1 == chunk->map_used;
+ int head, tail;
+
+ /* extra for alignment requirement */
+ head = ALIGN(off, align) - off;
+ BUG_ON(i == 0 && head != 0);
+
+ if (chunk->map[i] < 0)
+ continue;
+ if (chunk->map[i] < head + size) {
+ max_contig = max(chunk->map[i], max_contig);
+ continue;
+ }
+
+ /*
+ * If head is small or the previous block is free,
+ * merge'em. Note that 'small' is defined as smaller
+ * than sizeof(int), which is very small but isn't too
+ * uncommon for percpu allocations.
+ */
+ if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
+ if (chunk->map[i - 1] > 0)
+ chunk->map[i - 1] += head;
+ else {
+ chunk->map[i - 1] -= head;
+ chunk->free_size -= head;
+ }
+ chunk->map[i] -= head;
+ off += head;
+ head = 0;
+ }
+
+ /* if tail is small, just keep it around */
+ tail = chunk->map[i] - head - size;
+ if (tail < sizeof(int))
+ tail = 0;
+
+ /* split if warranted */
+ if (head || tail) {
+ pcpu_split_block(chunk, i, head, tail);
+ if (head) {
+ i++;
+ off += head;
+ max_contig = max(chunk->map[i - 1], max_contig);
+ }
+ if (tail)
+ max_contig = max(chunk->map[i + 1], max_contig);
+ }
+
+ /* update hint and mark allocated */
+ if (is_last)
+ chunk->contig_hint = max_contig; /* fully scanned */
+ else
+ chunk->contig_hint = max(chunk->contig_hint,
+ max_contig);
+
+ chunk->free_size -= chunk->map[i];
+ chunk->map[i] = -chunk->map[i];
+
+ pcpu_chunk_relocate(chunk, oslot);
+ return off;
+ }
+
+ chunk->contig_hint = max_contig; /* fully scanned */
+ pcpu_chunk_relocate(chunk, oslot);
+
+ /* tell the upper layer that this chunk has no matching area */
+ return -1;
+}
+
+/**
+ * pcpu_free_area - free area to a pcpu_chunk
+ * @chunk: chunk of interest
+ * @freeme: offset of area to free
+ *
+ * Free area starting from @freeme to @chunk. Note that this function
+ * only modifies the allocation map. It doesn't depopulate or unmap
+ * the area.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
+{
+ int oslot = pcpu_chunk_slot(chunk);
+ int i, off;
+
+ for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
+ if (off == freeme)
+ break;
+ BUG_ON(off != freeme);
+ BUG_ON(chunk->map[i] > 0);
+
+ chunk->map[i] = -chunk->map[i];
+ chunk->free_size += chunk->map[i];
+
+ /* merge with previous? */
+ if (i > 0 && chunk->map[i - 1] >= 0) {
+ chunk->map[i - 1] += chunk->map[i];
+ chunk->map_used--;
+ memmove(&chunk->map[i], &chunk->map[i + 1],
+ (chunk->map_used - i) * sizeof(chunk->map[0]));
+ i--;
+ }
+ /* merge with next? */
+ if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
+ chunk->map[i] += chunk->map[i + 1];
+ chunk->map_used--;
+ memmove(&chunk->map[i + 1], &chunk->map[i + 2],
+ (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
+ }
+
+ chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
+ pcpu_chunk_relocate(chunk, oslot);
+}
+
+/**
+ * pcpu_unmap - unmap pages out of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to unmap
+ * @page_end: page index of the last page to unmap + 1
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
+ * If @flush is true, vcache is flushed before unmapping and tlb
+ * after.
+ */
+static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
+ bool flush)
+{
+ unsigned int last = num_possible_cpus() - 1;
+ unsigned int cpu;
+
+ /* unmap must not be done on immutable chunk */
+ WARN_ON(chunk->immutable);
+
+ /*
+ * Each flushing trial can be very expensive, issue flush on
+ * the whole region at once rather than doing it for each cpu.
+ * This could be an overkill but is more scalable.
+ */
+ if (flush)
+ flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+
+ for_each_possible_cpu(cpu)
+ unmap_kernel_range_noflush(
+ pcpu_chunk_addr(chunk, cpu, page_start),
+ (page_end - page_start) << PAGE_SHIFT);
+
+ /* ditto as flush_cache_vunmap() */
+ if (flush)
+ flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+}
+
+/**
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
+ * @chunk: chunk to depopulate
+ * @off: offset to the area to depopulate
+ * @size: size of the area to depopulate in bytes
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
+ * from @chunk. If @flush is true, vcache is flushed before unmapping
+ * and tlb after.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex.
+ */
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
+ bool flush)
+{
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ int unmap_start = -1;
+ int uninitialized_var(unmap_end);
+ unsigned int cpu;
+ int i;
+
+ for (i = page_start; i < page_end; i++) {
+ for_each_possible_cpu(cpu) {
+ struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+ if (!*pagep)
+ continue;
+
+ __free_page(*pagep);
+
+ /*
+ * If it's partial depopulation, it might get
+ * populated or depopulated again. Mark the
+ * page gone.
+ */
+ *pagep = NULL;
+
+ unmap_start = unmap_start < 0 ? i : unmap_start;
+ unmap_end = i + 1;
+ }
+ }
+
+ if (unmap_start >= 0)
+ pcpu_unmap(chunk, unmap_start, unmap_end, flush);
+}
+
+/**
+ * pcpu_map - map pages into a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to map
+ * @page_end: page index of the last page to map + 1
+ *
+ * For each cpu, map pages [@page_start,@page_end) into @chunk.
+ * vcache is flushed afterwards.
+ */
+static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+{
+ unsigned int last = num_possible_cpus() - 1;
+ unsigned int cpu;
+ int err;
+
+ /* map must not be done on immutable chunk */
+ WARN_ON(chunk->immutable);
+
+ for_each_possible_cpu(cpu) {
+ err = map_kernel_range_noflush(
+ pcpu_chunk_addr(chunk, cpu, page_start),
+ (page_end - page_start) << PAGE_SHIFT,
+ PAGE_KERNEL,
+ pcpu_chunk_pagep(chunk, cpu, page_start));
+ if (err < 0)
+ return err;
+ }
+
+ /* flush at once, please read comments in pcpu_unmap() */
+ flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
+ return 0;
+}
+
+/**
+ * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @off: offset to the area to populate
+ * @size: size of the area to populate in bytes
+ *
+ * For each cpu, populate and map pages [@page_start,@page_end) into
+ * @chunk. The area is cleared on return.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, does GFP_KERNEL allocation.
+ */
+static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
+{
+ const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ int map_start = -1;
+ int uninitialized_var(map_end);
+ unsigned int cpu;
+ int i;
+
+ for (i = page_start; i < page_end; i++) {
+ if (pcpu_chunk_page_occupied(chunk, i)) {
+ if (map_start >= 0) {
+ if (pcpu_map(chunk, map_start, map_end))
+ goto err;
+ map_start = -1;
+ }
+ continue;
+ }
+
+ map_start = map_start < 0 ? i : map_start;
+ map_end = i + 1;
+
+ for_each_possible_cpu(cpu) {
+ struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+ *pagep = alloc_pages_node(cpu_to_node(cpu),
+ alloc_mask, 0);
+ if (!*pagep)
+ goto err;
+ }
+ }
+
+ if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
+ goto err;
+
+ for_each_possible_cpu(cpu)
+ memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
+ size);
+
+ return 0;
+err:
+ /* likely under heavy memory pressure, give memory back */
+ pcpu_depopulate_chunk(chunk, off, size, true);
+ return -ENOMEM;
+}
+
+static void free_pcpu_chunk(struct pcpu_chunk *chunk)
+{
+ if (!chunk)
+ return;
+ if (chunk->vm)
+ free_vm_area(chunk->vm);
+ pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
+ kfree(chunk);
+}
+
+static struct pcpu_chunk *alloc_pcpu_chunk(void)
+{
+ struct pcpu_chunk *chunk;
+
+ chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+
+ chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+ chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
+ chunk->map[chunk->map_used++] = pcpu_unit_size;
+ chunk->page = chunk->page_ar;
+
+ chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
+ if (!chunk->vm) {
+ free_pcpu_chunk(chunk);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&chunk->list);
+ chunk->free_size = pcpu_unit_size;
+ chunk->contig_hint = pcpu_unit_size;
+
+ return chunk;
+}
+
+/**
+ * pcpu_alloc - the percpu allocator
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ * @reserved: allocate from the reserved chunk if available
+ *
+ * Allocate percpu area of @size bytes aligned at @align.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+{
+ struct pcpu_chunk *chunk;
+ int slot, off;
+
+ if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
+ WARN(true, "illegal size (%zu) or align (%zu) for "
+ "percpu allocation\n", size, align);
+ return NULL;
+ }
+
+ mutex_lock(&pcpu_alloc_mutex);
+ spin_lock_irq(&pcpu_lock);
+
+ /* serve reserved allocations from the reserved chunk if available */
+ if (reserved && pcpu_reserved_chunk) {
+ chunk = pcpu_reserved_chunk;
+ if (size > chunk->contig_hint ||
+ pcpu_extend_area_map(chunk) < 0)
+ goto fail_unlock;
+ off = pcpu_alloc_area(chunk, size, align);
+ if (off >= 0)
+ goto area_found;
+ goto fail_unlock;
+ }
+
+restart:
+ /* search through normal chunks */
+ for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
+ list_for_each_entry(chunk, &pcpu_slot[slot], list) {
+ if (size > chunk->contig_hint)
+ continue;
+
+ switch (pcpu_extend_area_map(chunk)) {
+ case 0:
+ break;
+ case 1:
+ goto restart; /* pcpu_lock dropped, restart */
+ default:
+ goto fail_unlock;
+ }
+
+ off = pcpu_alloc_area(chunk, size, align);
+ if (off >= 0)
+ goto area_found;
+ }
+ }
+
+ /* hmmm... no space left, create a new chunk */
+ spin_unlock_irq(&pcpu_lock);
+
+ chunk = alloc_pcpu_chunk();
+ if (!chunk)
+ goto fail_unlock_mutex;
+
+ spin_lock_irq(&pcpu_lock);
+ pcpu_chunk_relocate(chunk, -1);
+ pcpu_chunk_addr_insert(chunk);
+ goto restart;
+
+area_found:
+ spin_unlock_irq(&pcpu_lock);
+
+ /* populate, map and clear the area */
+ if (pcpu_populate_chunk(chunk, off, size)) {
+ spin_lock_irq(&pcpu_lock);
+ pcpu_free_area(chunk, off);
+ goto fail_unlock;
+ }
+
+ mutex_unlock(&pcpu_alloc_mutex);
+
+ return __addr_to_pcpu_ptr(chunk->vm->addr + off);
+
+fail_unlock:
+ spin_unlock_irq(&pcpu_lock);
+fail_unlock_mutex:
+ mutex_unlock(&pcpu_alloc_mutex);
+ return NULL;
+}
+
+/**
+ * __alloc_percpu - allocate dynamic percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align. Might
+ * sleep. Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_percpu(size_t size, size_t align)
+{
+ return pcpu_alloc(size, align, false);
+}
+EXPORT_SYMBOL_GPL(__alloc_percpu);
+
+/**
+ * __alloc_reserved_percpu - allocate reserved percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align from reserved
+ * percpu area if arch has set it up; otherwise, allocation is served
+ * from the same dynamic area. Might sleep. Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_reserved_percpu(size_t size, size_t align)
+{
+ return pcpu_alloc(size, align, true);
+}
+
+/**
+ * pcpu_reclaim - reclaim fully free chunks, workqueue function
+ * @work: unused
+ *
+ * Reclaim all fully free chunks except for the first one.
+ *
+ * CONTEXT:
+ * workqueue context.
+ */
+static void pcpu_reclaim(struct work_struct *work)
+{
+ LIST_HEAD(todo);
+ struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
+ struct pcpu_chunk *chunk, *next;
+
+ mutex_lock(&pcpu_alloc_mutex);
+ spin_lock_irq(&pcpu_lock);
+
+ list_for_each_entry_safe(chunk, next, head, list) {
+ WARN_ON(chunk->immutable);
+
+ /* spare the first one */
+ if (chunk == list_first_entry(head, struct pcpu_chunk, list))
+ continue;
+
+ rb_erase(&chunk->rb_node, &pcpu_addr_root);
+ list_move(&chunk->list, &todo);
+ }
+
+ spin_unlock_irq(&pcpu_lock);
+ mutex_unlock(&pcpu_alloc_mutex);
+
+ list_for_each_entry_safe(chunk, next, &todo, list) {
+ pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
+ free_pcpu_chunk(chunk);
+ }
+}
+
+/**
+ * free_percpu - free percpu area
+ * @ptr: pointer to area to free
+ *
+ * Free percpu area @ptr.
+ *
+ * CONTEXT:
+ * Can be called from atomic context.
+ */
+void free_percpu(void *ptr)
+{
+ void *addr = __pcpu_ptr_to_addr(ptr);
+ struct pcpu_chunk *chunk;
+ unsigned long flags;
+ int off;
+
+ if (!ptr)
+ return;
+
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ chunk = pcpu_chunk_addr_search(addr);
+ off = addr - chunk->vm->addr;
+
+ pcpu_free_area(chunk, off);
+
+ /* if there are more than one fully free chunks, wake up grim reaper */
+ if (chunk->free_size == pcpu_unit_size) {
+ struct pcpu_chunk *pos;
+
+ list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+ if (pos != chunk) {
+ schedule_work(&pcpu_reclaim_work);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+EXPORT_SYMBOL_GPL(free_percpu);
+
+/**
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
+ * @get_page_fn: callback to fetch page pointer
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ * @base_addr: mapped address, NULL for auto
+ * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
+ *
+ * Initialize the first percpu chunk which contains the kernel static
+ * perpcu area. This function is to be called from arch percpu area
+ * setup path. The first two parameters are mandatory. The rest are
+ * optional.
+ *
+ * @get_page_fn() should return pointer to percpu page given cpu
+ * number and page number. It should at least return enough pages to
+ * cover the static area. The returned pages for static area should
+ * have been initialized with valid data. If @unit_size is specified,
+ * it can also return pages after the static area. NULL return
+ * indicates end of pages for the cpu. Note that @get_page_fn() must
+ * return the same number of pages for all cpus.
+ *
+ * @reserved_size, if non-zero, specifies the amount of bytes to
+ * reserve after the static area in the first chunk. This reserves
+ * the first chunk such that it's available only through reserved
+ * percpu allocation. This is primarily used to serve module percpu
+ * static areas on architectures where the addressing model has
+ * limited offset range for symbol relocations to guarantee module
+ * percpu symbols fall inside the relocatable range.
+ *
+ * @dyn_size, if non-negative, determines the number of bytes
+ * available for dynamic allocation in the first chunk. Specifying
+ * non-negative value makes percpu leave alone the area beyond
+ * @static_size + @reserved_size + @dyn_size.
+ *
+ * @unit_size, if non-negative, specifies unit size and must be
+ * aligned to PAGE_SIZE and equal to or larger than @static_size +
+ * @reserved_size + if non-negative, @dyn_size.
+ *
+ * Non-null @base_addr means that the caller already allocated virtual
+ * region for the first chunk and mapped it. percpu must not mess
+ * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
+ * @populate_pte_fn doesn't make any sense.
+ *
+ * @populate_pte_fn is used to populate the pagetable. NULL means the
+ * caller already populated the pagetable.
+ *
+ * If the first chunk ends up with both reserved and dynamic areas, it
+ * is served by two chunks - one to serve the core static and reserved
+ * areas and the other for the dynamic area. They share the same vm
+ * and page map but uses different area allocation map to stay away
+ * from each other. The latter chunk is circulated in the chunk slots
+ * and available for dynamic allocation like any other chunks.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access.
+ */
+size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size,
+ void *base_addr,
+ pcpu_populate_pte_fn_t populate_pte_fn)
+{
+ static struct vm_struct first_vm;
+ static int smap[2], dmap[2];
+ size_t size_sum = static_size + reserved_size +
+ (dyn_size >= 0 ? dyn_size : 0);
+ struct pcpu_chunk *schunk, *dchunk = NULL;
+ unsigned int cpu;
+ int nr_pages;
+ int err, i;
+
+ /* santiy checks */
+ BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
+ ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
+ BUG_ON(!static_size);
+ if (unit_size >= 0) {
+ BUG_ON(unit_size < size_sum);
+ BUG_ON(unit_size & ~PAGE_MASK);
+ BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
+ } else
+ BUG_ON(base_addr);
+ BUG_ON(base_addr && populate_pte_fn);
+
+ if (unit_size >= 0)
+ pcpu_unit_pages = unit_size >> PAGE_SHIFT;
+ else
+ pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
+ PFN_UP(size_sum));
+
+ pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
+ pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
+ pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
+ + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
+
+ if (dyn_size < 0)
+ dyn_size = pcpu_unit_size - static_size - reserved_size;
+
+ /*
+ * Allocate chunk slots. The additional last slot is for
+ * empty chunks.
+ */
+ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
+ pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+ for (i = 0; i < pcpu_nr_slots; i++)
+ INIT_LIST_HEAD(&pcpu_slot[i]);
+
+ /*
+ * Initialize static chunk. If reserved_size is zero, the
+ * static chunk covers static area + dynamic allocation area
+ * in the first chunk. If reserved_size is not zero, it
+ * covers static area + reserved area (mostly used for module
+ * static percpu allocation).
+ */
+ schunk = alloc_bootmem(pcpu_chunk_struct_size);
+ INIT_LIST_HEAD(&schunk->list);
+ schunk->vm = &first_vm;
+ schunk->map = smap;
+ schunk->map_alloc = ARRAY_SIZE(smap);
+ schunk->page = schunk->page_ar;
+
+ if (reserved_size) {
+ schunk->free_size = reserved_size;
+ pcpu_reserved_chunk = schunk; /* not for dynamic alloc */
+ } else {
+ schunk->free_size = dyn_size;
+ dyn_size = 0; /* dynamic area covered */
+ }
+ schunk->contig_hint = schunk->free_size;
+
+ schunk->map[schunk->map_used++] = -static_size;
+ if (schunk->free_size)
+ schunk->map[schunk->map_used++] = schunk->free_size;
+
+ pcpu_reserved_chunk_limit = static_size + schunk->free_size;
+
+ /* init dynamic chunk if necessary */
+ if (dyn_size) {
+ dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
+ INIT_LIST_HEAD(&dchunk->list);
+ dchunk->vm = &first_vm;
+ dchunk->map = dmap;
+ dchunk->map_alloc = ARRAY_SIZE(dmap);
+ dchunk->page = schunk->page_ar; /* share page map with schunk */
+
+ dchunk->contig_hint = dchunk->free_size = dyn_size;
+ dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
+ dchunk->map[dchunk->map_used++] = dchunk->free_size;
+ }
+
+ /* allocate vm address */
+ first_vm.flags = VM_ALLOC;
+ first_vm.size = pcpu_chunk_size;
+
+ if (!base_addr)
+ vm_area_register_early(&first_vm, PAGE_SIZE);
+ else {
+ /*
+ * Pages already mapped. No need to remap into
+ * vmalloc area. In this case the first chunks can't
+ * be mapped or unmapped by percpu and are marked
+ * immutable.
+ */
+ first_vm.addr = base_addr;
+ schunk->immutable = true;
+ if (dchunk)
+ dchunk->immutable = true;
+ }
+
+ /* assign pages */
+ nr_pages = -1;
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < pcpu_unit_pages; i++) {
+ struct page *page = get_page_fn(cpu, i);
+
+ if (!page)
+ break;
+ *pcpu_chunk_pagep(schunk, cpu, i) = page;
+ }
+
+ BUG_ON(i < PFN_UP(static_size));
+
+ if (nr_pages < 0)
+ nr_pages = i;
+ else
+ BUG_ON(nr_pages != i);
+ }
+
+ /* map them */
+ if (populate_pte_fn) {
+ for_each_possible_cpu(cpu)
+ for (i = 0; i < nr_pages; i++)
+ populate_pte_fn(pcpu_chunk_addr(schunk,
+ cpu, i));
+
+ err = pcpu_map(schunk, 0, nr_pages);
+ if (err)
+ panic("failed to setup static percpu area, err=%d\n",
+ err);
+ }
+
+ /* link the first chunk in */
+ if (!dchunk) {
+ pcpu_chunk_relocate(schunk, -1);
+ pcpu_chunk_addr_insert(schunk);
+ } else {
+ pcpu_chunk_relocate(dchunk, -1);
+ pcpu_chunk_addr_insert(dchunk);
+ }
+
+ /* we're done */
+ pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
+ return pcpu_unit_size;
+}
+
+/*
+ * Embedding first chunk setup helper.
+ */
+static void *pcpue_ptr __initdata;
+static size_t pcpue_size __initdata;
+static size_t pcpue_unit_size __initdata;
+
+static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
+{
+ size_t off = (size_t)pageno << PAGE_SHIFT;
+
+ if (off >= pcpue_size)
+ return NULL;
+
+ return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
+}
+
+/**
+ * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ *
+ * This is a helper to ease setting up embedded first percpu chunk and
+ * can be called where pcpu_setup_first_chunk() is expected.
+ *
+ * If this function is used to setup the first chunk, it is allocated
+ * as a contiguous area using bootmem allocator and used as-is without
+ * being mapped into vmalloc area. This enables the first chunk to
+ * piggy back on the linear physical mapping which often uses larger
+ * page size.
+ *
+ * When @dyn_size is positive, dynamic area might be larger than
+ * specified to fill page alignment. Also, when @dyn_size is auto,
+ * @dyn_size does not fill the whole first chunk but only what's
+ * necessary for page alignment after static and reserved areas.
+ *
+ * If the needed size is smaller than the minimum or specified unit
+ * size, the leftover is returned to the bootmem allocator.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access on success, -errno on failure.
+ */
+ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size)
+{
+ unsigned int cpu;
+
+ /* determine parameters and allocate */
+ pcpue_size = PFN_ALIGN(static_size + reserved_size +
+ (dyn_size >= 0 ? dyn_size : 0));
+ if (dyn_size != 0)
+ dyn_size = pcpue_size - static_size - reserved_size;
+
+ if (unit_size >= 0) {
+ BUG_ON(unit_size < pcpue_size);
+ pcpue_unit_size = unit_size;
+ } else
+ pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
+
+ pcpue_ptr = __alloc_bootmem_nopanic(
+ num_possible_cpus() * pcpue_unit_size,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ if (!pcpue_ptr)
+ return -ENOMEM;
+
+ /* return the leftover and copy */
+ for_each_possible_cpu(cpu) {
+ void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
+
+ free_bootmem(__pa(ptr + pcpue_size),
+ pcpue_unit_size - pcpue_size);
+ memcpy(ptr, __per_cpu_load, static_size);
+ }
+
+ /* we're ready, commit */
+ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
+ pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+
+ return pcpu_setup_first_chunk(pcpue_get_page, static_size,
+ reserved_size, dyn_size,
+ pcpue_unit_size, pcpue_ptr, NULL);
+}
diff --git a/mm/readahead.c b/mm/readahead.c
index bec83c15a78f..133b6d525513 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,19 +17,6 @@
#include <linux/pagevec.h>
#include <linux/pagemap.h>
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-EXPORT_SYMBOL(default_unplug_io_fn);
-
-struct backing_dev_info default_backing_dev_info = {
- .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
- .state = 0,
- .capabilities = BDI_CAP_MAP_COPY,
- .unplug_io_fn = default_unplug_io_fn,
-};
-EXPORT_SYMBOL_GPL(default_backing_dev_info);
-
/*
* Initialise a struct file's readahead state. Assumes that the caller has
* memset *ra to zero.
@@ -44,6 +31,42 @@ EXPORT_SYMBOL_GPL(file_ra_state_init);
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
+/*
+ * see if a page needs releasing upon read_cache_pages() failure
+ * - the caller of read_cache_pages() may have set PG_private or PG_fscache
+ * before calling, such as the NFS fs marking pages that are cached locally
+ * on disk, thus we need to give the fs a chance to clean up in the event of
+ * an error
+ */
+static void read_cache_pages_invalidate_page(struct address_space *mapping,
+ struct page *page)
+{
+ if (page_has_private(page)) {
+ if (!trylock_page(page))
+ BUG();
+ page->mapping = mapping;
+ do_invalidatepage(page, 0);
+ page->mapping = NULL;
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
+/*
+ * release a list of pages, invalidating them first if need be
+ */
+static void read_cache_pages_invalidate_pages(struct address_space *mapping,
+ struct list_head *pages)
+{
+ struct page *victim;
+
+ while (!list_empty(pages)) {
+ victim = list_to_page(pages);
+ list_del(&victim->lru);
+ read_cache_pages_invalidate_page(mapping, victim);
+ }
+}
+
/**
* read_cache_pages - populate an address space with some pages & start reads against them
* @mapping: the address_space
@@ -65,14 +88,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
- page_cache_release(page);
+ read_cache_pages_invalidate_page(mapping, page);
continue;
}
page_cache_release(page);
ret = filler(data, page);
if (unlikely(ret)) {
- put_pages_list(pages);
+ read_cache_pages_invalidate_pages(mapping, pages);
break;
}
task_io_account_read(PAGE_CACHE_SIZE);
@@ -233,18 +256,6 @@ unsigned long max_sane_readahead(unsigned long nr)
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
}
-static int __init readahead_init(void)
-{
- int err;
-
- err = bdi_init(&default_backing_dev_info);
- if (!err)
- bdi_register(&default_backing_dev_info, NULL, "default");
-
- return err;
-}
-subsys_initcall(readahead_init);
-
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
diff --git a/mm/shmem.c b/mm/shmem.c
index 4103a239ce84..d94d2e9146bc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@@ -1067,8 +1068,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
swap_duplicate(swap);
BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */
- set_page_dirty(page);
- unlock_page(page);
+ swap_writepage(page, wbc);
if (inode) {
mutex_lock(&shmem_swaplist_mutex);
/* move instead of add in case we're racing */
@@ -2665,6 +2665,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
+ ima_shm_check(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;
diff --git a/mm/slab.c b/mm/slab.c
index 4d00855629c4..208323fd37bc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
+ lockdep_trace_alloc(flags);
+
if (slab_should_failslab(cachep, flags))
return NULL;
@@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
+ lockdep_trace_alloc(flags);
+
if (slab_should_failslab(cachep, flags))
return NULL;
@@ -3988,8 +3992,7 @@ static void cache_reap(struct work_struct *w)
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
- struct delayed_work *work =
- container_of(w, struct delayed_work, work);
+ struct delayed_work *work = to_delayed_work(w);
if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..7a3411524dac 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
+ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct slob_page *sp)
{
return PageSlobPage((struct page *)sp);
}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
__ClearPageSlobPage((struct page *)sp);
}
+static inline struct slob_page *slob_page(const void *addr)
+{
+ return (struct slob_page *)virt_to_page(addr);
+}
+
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
+ return NULL;
+ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +403,11 @@ static void slob_free(void *block, int size)
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
+ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
- goto out;
+ return;
}
if (!slob_page_free(sp)) {
@@ -464,6 +475,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ lockdep_trace_alloc(gfp);
+
if (size < PAGE_SIZE - align) {
if (!size)
return ZERO_SIZE_PTR;
@@ -476,7 +489,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
void *ret;
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
@@ -494,8 +507,8 @@ void kfree(const void *block)
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
@@ -513,8 +526,8 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
@@ -573,7 +586,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align, node);
else
- b = slob_new_page(flags, get_order(c->size), node);
+ b = slob_new_pages(flags, get_order(c->size), node);
if (c->ctor)
c->ctor(b);
@@ -587,7 +600,7 @@ static void __kmem_cache_free(void *b, int size)
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..c4ea9158c9fb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
- struct track *p;
-
- if (s->offset)
- p = object + s->offset + sizeof(void *);
- else
- p = object + s->inuse;
+ struct track *p = get_track(s, object, alloc);
- p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
@@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
- n->nr_partial > n->min_partial) {
+ n->nr_partial > s->min_partial) {
page = get_partial_node(n);
if (page)
return page;
@@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
- if (n->nr_partial < n->min_partial) {
+ if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
@@ -1596,6 +1590,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags;
unsigned int objsize;
+ lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
if (should_failslab(s->objsize, gfpflags))
@@ -1724,7 +1719,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
+ debug_check_no_obj_freed(object, c->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
@@ -1844,6 +1839,7 @@ static inline int calculate_order(int size)
int order;
int min_objects;
int fraction;
+ int max_objects;
/*
* Attempt to find best configuration for a slab. This
@@ -1856,6 +1852,9 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
+ max_objects = (PAGE_SIZE << slub_max_order)/size;
+ min_objects = min(min_objects, max_objects);
+
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
@@ -1865,7 +1864,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
- min_objects /= 2;
+ min_objects --;
}
/*
@@ -1928,17 +1927,6 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
n->nr_partial = 0;
-
- /*
- * The larger the object size is, the more pages we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- n->min_partial = ilog2(s->size);
- if (n->min_partial < MIN_PARTIAL)
- n->min_partial = MIN_PARTIAL;
- else if (n->min_partial > MAX_PARTIAL)
- n->min_partial = MAX_PARTIAL;
-
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2169,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
}
#endif
+static void set_min_partial(struct kmem_cache *s, unsigned long min)
+{
+ if (min < MIN_PARTIAL)
+ min = MIN_PARTIAL;
+ else if (min > MAX_PARTIAL)
+ min = MAX_PARTIAL;
+ s->min_partial = min;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -2319,6 +2316,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
+ /*
+ * The larger the object size is, the more pages we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ set_min_partial(s, ilog2(s->size));
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -2475,7 +2477,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2539,7 @@ panic:
}
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
@@ -2658,7 +2660,7 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2686,7 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags);
@@ -2986,7 +2988,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3023,7 +3025,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3223,7 +3225,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
@@ -3239,7 +3241,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE))
+ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
@@ -3836,6 +3838,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR(order);
+static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%lu\n", s->min_partial);
+}
+
+static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ unsigned long min;
+ int err;
+
+ err = strict_strtoul(buf, 10, &min);
+ if (err)
+ return err;
+
+ set_min_partial(s, min);
+ return length;
+}
+SLAB_ATTR(min_partial);
+
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (s->ctor) {
@@ -4151,6 +4173,7 @@ static struct attribute *slab_attrs[] = {
&object_size_attr.attr,
&objs_per_slab_attr.attr,
&order_attr.attr,
+ &min_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&total_objects_attr.attr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 083f5b63e7a8..da432d9f0ae8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -164,9 +164,7 @@ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
WARN_ON_ONCE(1);
*start_pfn = max_sparsemem_pfn;
*end_pfn = max_sparsemem_pfn;
- }
-
- if (*end_pfn > max_sparsemem_pfn) {
+ } else if (*end_pfn > max_sparsemem_pfn) {
mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
*start_pfn, *end_pfn, max_sparsemem_pfn);
diff --git a/mm/swap.c b/mm/swap.c
index 8adb9feb61e1..bede23ce64ea 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -448,8 +448,8 @@ void pagevec_strip(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- if (PagePrivate(page) && trylock_page(page)) {
- if (PagePrivate(page))
+ if (page_has_private(page) && trylock_page(page)) {
+ if (page_has_private(page))
try_to_release_page(page, 0);
unlock_page(page);
}
@@ -457,29 +457,6 @@ void pagevec_strip(struct pagevec *pvec)
}
/**
- * pagevec_swap_free - try to free swap space from the pages in a pagevec
- * @pvec: pagevec with swapcache pages to free the swap space of
- *
- * The caller needs to hold an extra reference to each page and
- * not hold the page lock on the pages. This function uses a
- * trylock on the page lock so it may not always free the swap
- * space associated with a page.
- */
-void pagevec_swap_free(struct pagevec *pvec)
-{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
-
- if (PageSwapCache(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
- }
- }
-}
-
-/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
* @mapping: The address_space to search
diff --git a/mm/truncate.c b/mm/truncate.c
index 1229211104f8..55206fab7b99 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -50,7 +50,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
static inline void truncate_partial_page(struct page *page, unsigned partial)
{
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
- if (PagePrivate(page))
+ if (page_has_private(page))
do_invalidatepage(page, partial);
}
@@ -99,7 +99,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return;
- if (PagePrivate(page))
+ if (page_has_private(page))
do_invalidatepage(page, 0);
cancel_dirty_page(page, PAGE_CACHE_SIZE);
@@ -126,7 +126,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return 0;
- if (PagePrivate(page) && !try_to_release_page(page, 0))
+ if (page_has_private(page) && !try_to_release_page(page, 0))
return 0;
clear_page_mlock(page);
@@ -348,7 +348,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return 0;
- if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
+ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0;
spin_lock_irq(&mapping->tree_lock);
@@ -356,7 +356,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
goto failed;
clear_page_mlock(page);
- BUG_ON(PagePrivate(page));
+ BUG_ON(page_has_private(page));
__remove_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page); /* pagecache ref */
diff --git a/mm/util.c b/mm/util.c
index 37eaccdf3054..7c122e49f769 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -70,6 +70,36 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
EXPORT_SYMBOL(kmemdup);
/**
+ * memdup_user - duplicate memory region from user space
+ *
+ * @src: source address in user space
+ * @len: number of bytes to copy
+ *
+ * Returns an ERR_PTR() on failure.
+ */
+void *memdup_user(const void __user *src, size_t len)
+{
+ void *p;
+
+ /*
+ * Always use GFP_KERNEL, since copy_from_user() can sleep and
+ * cause pagefault, which makes it pointless to use GFP_NOFS
+ * or GFP_ATOMIC.
+ */
+ p = kmalloc_track_caller(len, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return p;
+}
+EXPORT_SYMBOL(memdup_user);
+
+/**
* __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 520a75980269..fab19876b4d1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,7 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
-static int vmap_page_range(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
if (err)
break;
} while (pgd++, addr = next, addr != end);
- flush_cache_vmap(start, end);
if (unlikely(err))
return err;
return nr;
}
+static int vmap_page_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
+{
+ int ret;
+
+ ret = vmap_page_range_noflush(start, end, prot, pages);
+ flush_cache_vmap(start, end);
+ return ret;
+}
+
static inline int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -661,10 +671,7 @@ struct vmap_block {
DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
union {
- struct {
- struct list_head free_list;
- struct list_head dirty_list;
- };
+ struct list_head free_list;
struct rcu_head rcu_head;
};
};
@@ -731,7 +738,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
INIT_LIST_HEAD(&vb->free_list);
- INIT_LIST_HEAD(&vb->dirty_list);
vb_idx = addr_to_vb_idx(va->va_start);
spin_lock(&vmap_block_tree_lock);
@@ -762,12 +768,7 @@ static void free_vmap_block(struct vmap_block *vb)
struct vmap_block *tmp;
unsigned long vb_idx;
- spin_lock(&vb->vbq->lock);
- if (!list_empty(&vb->free_list))
- list_del(&vb->free_list);
- if (!list_empty(&vb->dirty_list))
- list_del(&vb->dirty_list);
- spin_unlock(&vb->vbq->lock);
+ BUG_ON(!list_empty(&vb->free_list));
vb_idx = addr_to_vb_idx(vb->va->va_start);
spin_lock(&vmap_block_tree_lock);
@@ -852,11 +853,7 @@ static void vb_free(const void *addr, unsigned long size)
spin_lock(&vb->lock);
bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
- if (!vb->dirty) {
- spin_lock(&vb->vbq->lock);
- list_add(&vb->dirty_list, &vb->vbq->dirty);
- spin_unlock(&vb->vbq->lock);
- }
+
vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
BUG_ON(vb->free || !list_empty(&vb->free_list));
@@ -990,6 +987,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_area_register_early - register vmap area early during boot
+ * @vm: vm_struct to register
+ * @align: requested alignment
+ *
+ * This function is used to register kernel vm area before
+ * vmalloc_init() is called. @vm->size and @vm->flags should contain
+ * proper values on entry and other fields should be zero. On return,
+ * vm->addr contains the allocated address.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_register_early(struct vm_struct *vm, size_t align)
+{
+ static size_t vm_init_off __initdata;
+ unsigned long addr;
+
+ addr = ALIGN(VMALLOC_START + vm_init_off, align);
+ vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
+
+ vm->addr = (void *)addr;
+
+ vm->next = vmlist;
+ vmlist = vm;
+}
+
void __init vmalloc_init(void)
{
struct vmap_area *va;
@@ -1017,6 +1040,58 @@ void __init vmalloc_init(void)
vmap_initialized = true;
}
+/**
+ * map_kernel_range_noflush - map kernel VM area with the specified pages
+ * @addr: start of the VM area to map
+ * @size: size of the VM area to map
+ * @prot: page protection flags to use
+ * @pages: pages to map
+ *
+ * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vmap() on to-be-mapped areas
+ * before calling this function.
+ *
+ * RETURNS:
+ * The number of pages mapped on success, -errno on failure.
+ */
+int map_kernel_range_noflush(unsigned long addr, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ return vmap_page_range_noflush(addr, addr + size, prot, pages);
+}
+
+/**
+ * unmap_kernel_range_noflush - unmap kernel VM area
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vunmap() on to-be-mapped areas
+ * before calling this function and flush_tlb_kernel_range() after.
+ */
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+ vunmap_page_range(addr, addr + size);
+}
+
+/**
+ * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Similar to unmap_kernel_range_noflush() but flushes vcache before
+ * the unmapping and tlb after.
+ */
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;
@@ -1267,6 +1342,7 @@ EXPORT_SYMBOL(vfree);
void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
+ might_sleep();
__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);
@@ -1286,6 +1362,8 @@ void *vmap(struct page **pages, unsigned int count,
{
struct vm_struct *area;
+ might_sleep();
+
if (count > num_physpages)
return NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56ddf41149eb..425244988bb2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -60,8 +60,8 @@ struct scan_control {
int may_writepage;
- /* Can pages be swapped as part of reclaim? */
- int may_swap;
+ /* Can mapped pages be reclaimed? */
+ int may_unmap;
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
@@ -78,6 +78,12 @@ struct scan_control {
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
+ /*
+ * Nodemask of nodes allowed by the caller. If NULL, all nodes
+ * are scanned.
+ */
+ nodemask_t *nodemask;
+
/* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode,
@@ -214,8 +220,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0) {
- printk(KERN_ERR "%s: nr=%ld\n",
- __func__, shrinker->nr);
+ printk(KERN_ERR "shrink_slab: %pF negative objects to "
+ "delete nr=%ld\n",
+ shrinker->shrink, shrinker->nr);
shrinker->nr = max_pass;
}
@@ -276,7 +283,7 @@ static inline int page_mapping_inuse(struct page *page)
static inline int is_page_cache_freeable(struct page *page)
{
- return page_count(page) - !!PagePrivate(page) == 2;
+ return page_count(page) - !!page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -360,7 +367,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* Some data journaling orphaned pages can have
* page->mapping == NULL while being dirty with clean buffers.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (try_to_free_buffers(page)) {
ClearPageDirty(page);
printk("%s: orphaned page\n", __func__);
@@ -606,7 +613,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(!page_evictable(page, NULL)))
goto cull_mlocked;
- if (!sc->may_swap && page_mapped(page))
+ if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
@@ -720,7 +727,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* process address space (page_count == 1) it can be freed.
* Otherwise, leave the page on the LRU so it is swappable.
*/
- if (PagePrivate(page)) {
+ if (page_has_private(page)) {
if (!try_to_release_page(page, sc->gfp_mask))
goto activate_locked;
if (!mapping && page_count(page) == 1) {
@@ -1298,17 +1305,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
- if (vm_swap_full())
- pagevec_swap_free(&pvec);
-
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
pagevec_release(&pvec);
}
@@ -1543,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
struct zone *zone;
sc->all_unreclaimable = 1;
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+ sc->nodemask) {
if (!populated_zone(zone))
continue;
/*
@@ -1688,17 +1690,18 @@ out:
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, nodemask_t *nodemask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
- .may_swap = 1,
+ .may_unmap = 1,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
+ .nodemask = nodemask,
};
return do_try_to_free_pages(zonelist, &sc);
@@ -1713,17 +1716,18 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct scan_control sc = {
.may_writepage = !laptop_mode,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
+ .nodemask = NULL, /* we don't care the placement */
};
struct zonelist *zonelist;
if (noswap)
- sc.may_swap = 0;
+ sc.may_unmap = 0;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1762,7 +1766,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
.order = order,
@@ -1965,6 +1969,8 @@ static int kswapd(void *p)
};
node_to_cpumask_ptr(cpumask, pgdat->node_id);
+ lockdep_set_current_reclaim_state(GFP_KERNEL);
+
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
@@ -2048,22 +2054,19 @@ unsigned long global_lru_pages(void)
#ifdef CONFIG_PM
/*
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
*
* For pass > 3 we also try to shrink the LRU lists that contain a few pages
*/
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long ret = 0;
+ unsigned long nr_reclaimed = 0;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
enum lru_list l;
- if (!populated_zone(zone))
- continue;
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
continue;
@@ -2082,14 +2085,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
zone->lru[l].nr_scan = 0;
nr_to_scan = min(nr_pages, lru_pages);
- ret += shrink_list(l, nr_to_scan, zone,
+ nr_reclaimed += shrink_list(l, nr_to_scan, zone,
sc, prio);
- if (ret >= nr_pages)
- return ret;
+ if (nr_reclaimed >= nr_pages) {
+ sc->nr_reclaimed = nr_reclaimed;
+ return;
+ }
}
}
}
- return ret;
+ sc->nr_reclaimed = nr_reclaimed;
}
/*
@@ -2103,13 +2108,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
unsigned long shrink_all_memory(unsigned long nr_pages)
{
unsigned long lru_pages, nr_slab;
- unsigned long ret = 0;
int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 0,
- .swap_cluster_max = nr_pages,
+ .may_unmap = 0,
.may_writepage = 1,
.isolate_pages = isolate_pages_global,
};
@@ -2125,8 +2128,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
if (!reclaim_state.reclaimed_slab)
break;
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
nr_slab -= reclaim_state.reclaimed_slab;
@@ -2145,21 +2148,22 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
/* Force reclaiming mapped pages in the passes #3 and #4 */
if (pass > 2)
- sc.may_swap = 1;
+ sc.may_unmap = 1;
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - ret;
+ unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
sc.nr_scanned = 0;
- ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (ret >= nr_pages)
+ sc.swap_cluster_max = nr_to_scan;
+ shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2168,21 +2172,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
}
/*
- * If ret = 0, we could not shrink LRUs, but there may be something
- * in slab caches
+ * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+ * something in slab caches
*/
- if (!ret) {
+ if (!sc.nr_reclaimed) {
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ } while (sc.nr_reclaimed < nr_pages &&
+ reclaim_state.reclaimed_slab > 0);
}
+
out:
current->reclaim_state = NULL;
- return ret;
+ return sc.nr_reclaimed;
}
#endif
@@ -2288,11 +2294,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
- .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+ .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
+ .order = order,
.isolate_pages = isolate_pages_global,
};
unsigned long slab_reclaimable;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 91149746bb8d..66f6130976cb 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- for_each_cpu_mask_nr(cpu, *cpumask) {
+ for_each_cpu(cpu, cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
int cpu;
int threshold;
- for_each_zone(zone) {
-
- if (!zone->present_pages)
- continue;
-
+ for_each_populated_zone(zone) {
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
- if (!populated_zone(zone))
- continue;
-
p = zone_pcp(zone, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
@@ -898,7 +891,7 @@ static void vmstat_update(struct work_struct *w)
{
refresh_cpu_vm_stats(smp_processor_id());
schedule_delayed_work(&__get_cpu_var(vmstat_work),
- sysctl_stat_interval);
+ round_jiffies_relative(sysctl_stat_interval));
}
static void __cpuinit start_cpu_timer(int cpu)
@@ -906,7 +899,8 @@ static void __cpuinit start_cpu_timer(int cpu)
struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
- schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
+ schedule_delayed_work_on(cpu, vmstat_work,
+ __round_jiffies_relative(HZ, cpu));
}
/*