summaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c74
1 files changed, 26 insertions, 48 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 702bc3fd687a..3aed46ab7e6c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -73,13 +73,6 @@
*/
#define ZS_ALIGN 8
-/*
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
- */
-#define ZS_MAX_ZSPAGE_ORDER 2
-#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
-
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
/*
@@ -136,10 +129,13 @@
#define HUGE_BITS 1
#define FULLNESS_BITS 2
#define CLASS_BITS 8
-#define ISOLATED_BITS 3
+#define ISOLATED_BITS 5
#define MAGIC_VAL_BITS 8
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
+
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
#define ZS_MIN_ALLOC_SIZE \
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
@@ -822,42 +818,6 @@ out:
return newfg;
}
-/*
- * We have to decide on how many pages to link together
- * to form a zspage for each size class. This is important
- * to reduce wastage due to unusable space left at end of
- * each zspage which is given as:
- * wastage = Zp % class_size
- * usage = Zp - wastage
- * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
- *
- * For example, for size class of 3/8 * PAGE_SIZE, we should
- * link together 3 PAGE_SIZE sized pages to form a zspage
- * since then we can perfectly fit in 8 such objects.
- */
-static int get_pages_per_zspage(int class_size)
-{
- int i, max_usedpc = 0;
- /* zspage order which gives maximum used size per KB */
- int max_usedpc_order = 1;
-
- for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
- int zspage_size;
- int waste, usedpc;
-
- zspage_size = i * PAGE_SIZE;
- waste = zspage_size % class_size;
- usedpc = (zspage_size - waste) * 100 / zspage_size;
-
- if (usedpc > max_usedpc) {
- max_usedpc = usedpc;
- max_usedpc_order = i;
- }
- }
-
- return max_usedpc_order;
-}
-
static struct zspage *get_zspage(struct page *page)
{
struct zspage *zspage = (struct zspage *)page_private(page);
@@ -2056,7 +2016,6 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
* Page is locked so zspage couldn't be destroyed. For detail, look at
* lock_zspage in free_zspage.
*/
- VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(PageIsolated(page), page);
zspage = get_zspage(page);
@@ -2088,7 +2047,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
- VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(!PageIsolated(page), page);
/* The page is locked, so this pointer must remain valid */
@@ -2153,7 +2111,6 @@ static void zs_page_putback(struct page *page)
{
struct zspage *zspage;
- VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(!PageIsolated(page), page);
zspage = get_zspage(page);
@@ -2404,6 +2361,27 @@ static int zs_register_shrinker(struct zs_pool *pool)
pool->name);
}
+static int calculate_zspage_chain_size(int class_size)
+{
+ int i, min_waste = INT_MAX;
+ int chain_size = 1;
+
+ if (is_power_of_2(class_size))
+ return chain_size;
+
+ for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
+ int waste;
+
+ waste = (i * PAGE_SIZE) % class_size;
+ if (waste < min_waste) {
+ min_waste = waste;
+ chain_size = i;
+ }
+ }
+
+ return chain_size;
+}
+
/**
* zs_create_pool - Creates an allocation pool to work from.
* @name: pool name to be created
@@ -2448,7 +2426,7 @@ struct zs_pool *zs_create_pool(const char *name)
size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
if (size > ZS_MAX_ALLOC_SIZE)
size = ZS_MAX_ALLOC_SIZE;
- pages_per_zspage = get_pages_per_zspage(size);
+ pages_per_zspage = calculate_zspage_chain_size(size);
objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
/*