summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-25 01:02:14 +0200
committerTejun Heo <tj@kernel.org>2017-07-26 23:41:05 +0200
commit13f966373f9296c0da2fb2764654cce520b3a6b4 (patch)
treefe1bee3a86577ecf25907f3586ae36cb338e0dcd /mm/percpu.c
parentpercpu: add first_bit to keep track of the first free in the bitmap (diff)
downloadlinux-13f966373f9296c0da2fb2764654cce520b3a6b4.tar.xz
linux-13f966373f9296c0da2fb2764654cce520b3a6b4.zip
percpu: skip chunks if the alloc does not fit in the contig hint
This patch adds chunk->contig_bits_start to keep track of the contig hint's offset and the check to skip the chunk if it does not fit. If the chunk's contig hint starting offset cannot satisfy an allocation, the allocator assumes there is enough memory pressure in this chunk to either use a different chunk or create a new one. This accepts a less tight packing for a smoother latency curve. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 83abb190ca5a..734745a0c9b6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -400,12 +400,14 @@ static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off,
* @bit_off: chunk offset
* @bits: size of free area
*
- * This updates the chunk's contig hint given a free area.
+ * This updates the chunk's contig hint and starting offset given a free area.
*/
static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits)
{
- if (bits > chunk->contig_bits)
+ if (bits > chunk->contig_bits) {
+ chunk->contig_bits_start = bit_off;
chunk->contig_bits = bits;
+ }
}
/**
@@ -416,6 +418,7 @@ static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits)
*
* Updates:
* chunk->contig_bits
+ * chunk->contig_bits_start
* nr_empty_pop_pages
*/
static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk)
@@ -646,6 +649,17 @@ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
int bit_off, bits;
int re; /* region end */
+ /*
+ * Check to see if the allocation can fit in the chunk's contig hint.
+ * This is an optimization to prevent scanning by assuming if it
+ * cannot fit in the global hint, there is memory pressure and creating
+ * a new chunk would happen soon.
+ */
+ bit_off = ALIGN(chunk->contig_bits_start, align) -
+ chunk->contig_bits_start;
+ if (bit_off + alloc_bits > chunk->contig_bits)
+ return -1;
+
pcpu_for_each_unpop_region(chunk->alloc_map, bit_off, re,
chunk->first_bit,
pcpu_chunk_map_bits(chunk)) {