From 89e004ea55abe201b29e2d6e35124101f1288ef7 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Sat, 18 Oct 2008 20:26:43 -0700 Subject: SHM_LOCKED pages are unevictable Shmem segments locked into memory via shmctl(SHM_LOCKED) should not be kept on the normal LRU, since scanning them is a waste of time and might throw off kswapd's balancing algorithms. Place them on the unevictable LRU list instead. Use the AS_UNEVICTABLE flag to mark address_space of SHM_LOCKed shared memory regions as unevictable. Then these pages will be culled off the normal LRU lists during vmscan. Add new wrapper function to clear the mapping's unevictable state when/if shared memory segment is munlocked. Add 'scan_mapping_unevictable_page()' to mm/vmscan.c to scan all pages in the shmem segment's mapping [struct address_space] for evictability now that they're no longer locked. If so, move them to the appropriate zone lru list. Changes depend on [CONFIG_]UNEVICTABLE_LRU. [kosaki.motohiro@jp.fujitsu.com: revert shm change] Signed-off-by: Lee Schermerhorn Signed-off-by: Rik van Riel Signed-off-by: Kosaki Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 +-- include/linux/pagemap.h | 12 +++++-- include/linux/swap.h | 4 +++ ipc/shm.c | 4 +++ mm/shmem.c | 4 +++ mm/vmscan.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 112 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c61ba10768ea..40236290e2ae 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -700,10 +700,10 @@ static inline int page_mapped(struct page *page) extern void show_free_areas(void); #ifdef CONFIG_SHMEM -int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern int shmem_lock(struct file *file, int lock, struct user_struct *user); #else static inline int shmem_lock(struct file *file, int lock, - struct user_struct *user) + struct user_struct *user) { return 0; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 09164d2c5c27..4b6c4d8d26b8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -40,14 +40,20 @@ static inline void mapping_set_unevictable(struct address_space *mapping) set_bit(AS_UNEVICTABLE, &mapping->flags); } +static inline void mapping_clear_unevictable(struct address_space *mapping) +{ + clear_bit(AS_UNEVICTABLE, &mapping->flags); +} + static inline int mapping_unevictable(struct address_space *mapping) { - if (mapping && (mapping->flags & AS_UNEVICTABLE)) - return 1; - return 0; + if (likely(mapping)) + return test_bit(AS_UNEVICTABLE, &mapping->flags); + return !!mapping; } #else static inline void mapping_set_unevictable(struct address_space *mapping) { } +static inline void mapping_clear_unevictable(struct address_space *mapping) { } static inline int mapping_unevictable(struct address_space *mapping) { return 0; diff --git a/include/linux/swap.h b/include/linux/swap.h index a2113044d20a..7edb4cbc29f9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -232,12 +232,16 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) #ifdef CONFIG_UNEVICTABLE_LRU extern int page_evictable(struct page *page, struct vm_area_struct *vma); +extern void scan_mapping_unevictable_pages(struct address_space *); #else static inline int page_evictable(struct page *page, struct vm_area_struct *vma) { return 1; } +static inline void scan_mapping_unevictable_pages(struct address_space *mapping) +{ +} #endif extern int kswapd_run(int nid); diff --git a/ipc/shm.c b/ipc/shm.c index e77ec698cf40..0add3fa5f547 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -737,6 +737,10 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) case SHM_LOCK: case SHM_UNLOCK: { + struct file *uninitialized_var(shm_file); + + lru_add_drain_all(); /* drain pagevecs to lru lists */ + shp = shm_lock_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); diff --git a/mm/shmem.c b/mm/shmem.c index fc2ccf79a776..d38d7e61fcd0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1477,12 +1477,16 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) if (!user_shm_lock(inode->i_size, user)) goto out_nomem; info->flags |= VM_LOCKED; + mapping_set_unevictable(file->f_mapping); } if (!lock && (info->flags & VM_LOCKED) && user) { user_shm_unlock(inode->i_size, user); info->flags &= ~VM_LOCKED; + mapping_clear_unevictable(file->f_mapping); + scan_mapping_unevictable_pages(file->f_mapping); } retval = 0; + out_nomem: spin_unlock(&info->lock); return retval; diff --git a/mm/vmscan.c b/mm/vmscan.c index 9babfbc1ddc8..dfb342e0db9b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2346,4 +2346,93 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) return 1; } + +/** + * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list + * @page: page to check evictability and move to appropriate lru list + * @zone: zone page is in + * + * Checks a page for evictability and moves the page to the appropriate + * zone lru list. + * + * Restrictions: zone->lru_lock must be held, page must be on LRU and must + * have PageUnevictable set. + */ +static void check_move_unevictable_page(struct page *page, struct zone *zone) +{ + VM_BUG_ON(PageActive(page)); + +retry: + ClearPageUnevictable(page); + if (page_evictable(page, NULL)) { + enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); + __dec_zone_state(zone, NR_UNEVICTABLE); + list_move(&page->lru, &zone->lru[l].list); + __inc_zone_state(zone, NR_INACTIVE_ANON + l); + __count_vm_event(UNEVICTABLE_PGRESCUED); + } else { + /* + * rotate unevictable list + */ + SetPageUnevictable(page); + list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); + if (page_evictable(page, NULL)) + goto retry; + } +} + +/** + * scan_mapping_unevictable_pages - scan an address space for evictable pages + * @mapping: struct address_space to scan for evictable pages + * + * Scan all pages in mapping. Check unevictable pages for + * evictability and move them to the appropriate zone lru list. + */ +void scan_mapping_unevictable_pages(struct address_space *mapping) +{ + pgoff_t next = 0; + pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + struct zone *zone; + struct pagevec pvec; + + if (mapping->nrpages == 0) + return; + + pagevec_init(&pvec, 0); + while (next < end && + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + int i; + int pg_scanned = 0; + + zone = NULL; + + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + pgoff_t page_index = page->index; + struct zone *pagezone = page_zone(page); + + pg_scanned++; + if (page_index > next) + next = page_index; + next++; + + if (pagezone != zone) { + if (zone) + spin_unlock_irq(&zone->lru_lock); + zone = pagezone; + spin_lock_irq(&zone->lru_lock); + } + + if (PageLRU(page) && PageUnevictable(page)) + check_move_unevictable_page(page, zone); + } + if (zone) + spin_unlock_irq(&zone->lru_lock); + pagevec_release(&pvec); + + count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); + } + +} #endif -- cgit v1.2.3